function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
|---|---|---|---|---|---|---|
configure_app
|
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
|
"""
logan.runner
~~~~~~~~~~~~
:copyright: (c) 2012 David Cramer.
:license: Apache License 2.0, see NOTICE for more details.
"""
import argparse
import os
import re
import sys
from django.core import management
from nautobot import __version__
from . import importer
from .settings import create_default_settings
__configured = False
def sanitize_name(project):
project = project.replace(" ", "-")
return re.sub("[^A-Z0-9a-z_-]", "-", project)
def parse_command_args(args):
"""
This parses the arguments and returns a tuple containing:
(args, command, command_args)
For example, "--config=bar start --with=baz" would return:
(['--config=bar'], 'start', ['--with=baz'])
"""
index = None
for arg_i, arg in enumerate(args):
if not arg.startswith("-"):
index = arg_i
break
# Unable to parse any arguments
if index is None:
return (args, None, [])
return (args[:index], args[index], args[(index + 1) :])
def is_configured():
global __configured
return __configured
# MASKED: configure_app function (lines 57-156)
class VerboseHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
"""Argparse Formatter that includes newlines and shows argument defaults."""
def run_app(**kwargs):
sys_args = sys.argv
# The established command for running this program
runner_name = os.path.basename(sys_args[0])
default_config_path = kwargs.get("default_config_path")
# Primary parser
parser = management.CommandParser(
description=kwargs.pop("description"),
formatter_class=VerboseHelpFormatter,
add_help=False,
)
parser.add_argument(
"-c",
"--config",
metavar="CONFIG",
help="Path to the configuration file",
)
parser.add_argument(
"--version",
action="version",
version=__version__,
)
# This block of code here is done in this way because of the built in Django
# management command parsing not playing well unless you have a Django
# config with SECRET_KEY defined.
# Parse out `--config` here first capturing any unparsed args for passing to
# Django parser.
args, unparsed_args = parser.parse_known_args()
# Now add the sub-parser for "init" command
subparsers = parser.add_subparsers(help=False, dest="command", metavar="")
init_parser = subparsers.add_parser(
"init",
help="Initialize a new configuration",
)
init_parser.add_argument(
"config_path",
default=default_config_path,
nargs="?",
help="Path to output generated configuration file",
)
# Try to use our parser first, to process custom arguments
try:
args = parser.parse_args()
command = args.command
command_args = sys.argv[1:]
# Fallback to passing through to Django management commands
# except RuntimeError as err:
except management.CommandError as err:
if "invalid choice" not in str(err):
raise
# Rewrite sys_args to have the unparsed args (if any)
sys_args = sys_args[:1] + unparsed_args
_, command, command_args = parse_command_args(sys_args[1:])
# If we don't get a command of some sort, print help and exit dirty
if not command:
parser.print_help()
parser.exit(1)
# The `init` command is reserved for initializing configuration
if command == "init":
settings_initializer = kwargs.get("settings_initializer")
config_path = os.path.expanduser(args.config_path)
# Check if the config already exists; alert user and exit if exists.
if os.path.exists(config_path):
print(
f"A configuration already exists at {config_path}. Please backup and remove it or choose another path."
)
return
# Create the config
try:
create_default_settings(config_path, settings_initializer)
except OSError as e:
raise e.__class__("Unable to write default settings file to %r" % config_path)
print("Configuration file created at %r" % config_path)
return
# Fetch config path from `--config` if provided, otherwise we want it to
# default to None so that the underlying machinery in `configure_app` will
# process default path or environment variable.
config_path = args.config
# Overlay our config w/ defautls
try:
configure_app(config_path=config_path, **kwargs)
except ValueError as err:
parser.exit(status=2, message=str(err) + "\n")
# Call Django management command
management.execute_from_command_line([runner_name, command] + command_args)
# Exit cleanly
sys.exit(0)
if __name__ == "__main__":
run_app()
|
def configure_app(
config_path=None,
project=None,
default_config_path=None,
default_settings=None,
settings_initializer=None,
settings_envvar=None,
initializer=None,
allow_extras=True,
config_module_name=None,
runner_name=None,
on_configure=None,
):
"""
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
"""
global __configured
project_filename = sanitize_name(project)
if default_config_path is None:
default_config_path = "~/%s/%s.conf.py" % (project_filename, project_filename)
if settings_envvar is None:
settings_envvar = project_filename.upper() + "_CONF"
if config_module_name is None:
config_module_name = project_filename + "_config"
# normalize path
if settings_envvar in os.environ:
default_config_path = os.environ.get(settings_envvar)
else:
default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path)))
if not config_path:
config_path = default_config_path
config_path = os.path.expanduser(config_path)
if not os.path.exists(config_path):
if runner_name:
raise ValueError(
"Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,)
)
raise ValueError("Configuration file does not exist at %r" % (config_path,))
os.environ["DJANGO_SETTINGS_MODULE"] = config_module_name
def settings_callback(settings):
if initializer is None:
return
try:
initializer(
{
"project": project,
"config_path": config_path,
"settings": settings,
}
)
except Exception:
# XXX: Django doesn't like various errors in this path
import sys
import traceback
traceback.print_exc()
sys.exit(1)
importer.install(
config_module_name,
config_path,
default_settings,
allow_extras=allow_extras,
callback=settings_callback,
)
__configured = True
# HACK(dcramer): we need to force access of django.conf.settings to
# ensure we don't hit any import-driven recursive behavior
from django.conf import settings
hasattr(settings, "INSTALLED_APPS")
if on_configure:
on_configure(
{
"project": project,
"config_path": config_path,
"settings": settings,
}
)
| 57
| 156
|
"""
logan.runner
~~~~~~~~~~~~
:copyright: (c) 2012 David Cramer.
:license: Apache License 2.0, see NOTICE for more details.
"""
import argparse
import os
import re
import sys
from django.core import management
from nautobot import __version__
from . import importer
from .settings import create_default_settings
__configured = False
def sanitize_name(project):
project = project.replace(" ", "-")
return re.sub("[^A-Z0-9a-z_-]", "-", project)
def parse_command_args(args):
"""
This parses the arguments and returns a tuple containing:
(args, command, command_args)
For example, "--config=bar start --with=baz" would return:
(['--config=bar'], 'start', ['--with=baz'])
"""
index = None
for arg_i, arg in enumerate(args):
if not arg.startswith("-"):
index = arg_i
break
# Unable to parse any arguments
if index is None:
return (args, None, [])
return (args[:index], args[index], args[(index + 1) :])
def is_configured():
global __configured
return __configured
def configure_app(
config_path=None,
project=None,
default_config_path=None,
default_settings=None,
settings_initializer=None,
settings_envvar=None,
initializer=None,
allow_extras=True,
config_module_name=None,
runner_name=None,
on_configure=None,
):
"""
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
"""
global __configured
project_filename = sanitize_name(project)
if default_config_path is None:
default_config_path = "~/%s/%s.conf.py" % (project_filename, project_filename)
if settings_envvar is None:
settings_envvar = project_filename.upper() + "_CONF"
if config_module_name is None:
config_module_name = project_filename + "_config"
# normalize path
if settings_envvar in os.environ:
default_config_path = os.environ.get(settings_envvar)
else:
default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path)))
if not config_path:
config_path = default_config_path
config_path = os.path.expanduser(config_path)
if not os.path.exists(config_path):
if runner_name:
raise ValueError(
"Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,)
)
raise ValueError("Configuration file does not exist at %r" % (config_path,))
os.environ["DJANGO_SETTINGS_MODULE"] = config_module_name
def settings_callback(settings):
if initializer is None:
return
try:
initializer(
{
"project": project,
"config_path": config_path,
"settings": settings,
}
)
except Exception:
# XXX: Django doesn't like various errors in this path
import sys
import traceback
traceback.print_exc()
sys.exit(1)
importer.install(
config_module_name,
config_path,
default_settings,
allow_extras=allow_extras,
callback=settings_callback,
)
__configured = True
# HACK(dcramer): we need to force access of django.conf.settings to
# ensure we don't hit any import-driven recursive behavior
from django.conf import settings
hasattr(settings, "INSTALLED_APPS")
if on_configure:
on_configure(
{
"project": project,
"config_path": config_path,
"settings": settings,
}
)
class VerboseHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
"""Argparse Formatter that includes newlines and shows argument defaults."""
def run_app(**kwargs):
sys_args = sys.argv
# The established command for running this program
runner_name = os.path.basename(sys_args[0])
default_config_path = kwargs.get("default_config_path")
# Primary parser
parser = management.CommandParser(
description=kwargs.pop("description"),
formatter_class=VerboseHelpFormatter,
add_help=False,
)
parser.add_argument(
"-c",
"--config",
metavar="CONFIG",
help="Path to the configuration file",
)
parser.add_argument(
"--version",
action="version",
version=__version__,
)
# This block of code here is done in this way because of the built in Django
# management command parsing not playing well unless you have a Django
# config with SECRET_KEY defined.
# Parse out `--config` here first capturing any unparsed args for passing to
# Django parser.
args, unparsed_args = parser.parse_known_args()
# Now add the sub-parser for "init" command
subparsers = parser.add_subparsers(help=False, dest="command", metavar="")
init_parser = subparsers.add_parser(
"init",
help="Initialize a new configuration",
)
init_parser.add_argument(
"config_path",
default=default_config_path,
nargs="?",
help="Path to output generated configuration file",
)
# Try to use our parser first, to process custom arguments
try:
args = parser.parse_args()
command = args.command
command_args = sys.argv[1:]
# Fallback to passing through to Django management commands
# except RuntimeError as err:
except management.CommandError as err:
if "invalid choice" not in str(err):
raise
# Rewrite sys_args to have the unparsed args (if any)
sys_args = sys_args[:1] + unparsed_args
_, command, command_args = parse_command_args(sys_args[1:])
# If we don't get a command of some sort, print help and exit dirty
if not command:
parser.print_help()
parser.exit(1)
# The `init` command is reserved for initializing configuration
if command == "init":
settings_initializer = kwargs.get("settings_initializer")
config_path = os.path.expanduser(args.config_path)
# Check if the config already exists; alert user and exit if exists.
if os.path.exists(config_path):
print(
f"A configuration already exists at {config_path}. Please backup and remove it or choose another path."
)
return
# Create the config
try:
create_default_settings(config_path, settings_initializer)
except OSError as e:
raise e.__class__("Unable to write default settings file to %r" % config_path)
print("Configuration file created at %r" % config_path)
return
# Fetch config path from `--config` if provided, otherwise we want it to
# default to None so that the underlying machinery in `configure_app` will
# process default path or environment variable.
config_path = args.config
# Overlay our config w/ defautls
try:
configure_app(config_path=config_path, **kwargs)
except ValueError as err:
parser.exit(status=2, message=str(err) + "\n")
# Call Django management command
management.execute_from_command_line([runner_name, command] + command_args)
# Exit cleanly
sys.exit(0)
if __name__ == "__main__":
run_app()
|
basic_tokenize
|
The function is used to do some basic tokenization
Args:
input_str: The input string
lang: Language of the input string
Return: a list of tokens of the input string
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from copy import deepcopy
from nltk import word_tokenize
from tqdm import tqdm
import nemo.collections.nlp.data.text_normalization.constants as constants
__all__ = ['read_data_file', 'normalize_str']
def read_data_file(fp):
""" Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
"""
insts, w_words, s_words, classes = [], [], [], []
# Read input file
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in line.strip().split('\t')]
if es[0] == '<eos>':
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
# Reset
w_words, s_words, classes = [], [], []
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
def normalize_str(input_str, lang):
""" Normalize an input string """
input_str_tokens = basic_tokenize(input_str.strip().lower(), lang)
input_str = ' '.join(input_str_tokens)
input_str = input_str.replace(' ', ' ')
return input_str
def remove_puncts(input_str):
""" Remove punctuations from an input string """
return input_str.translate(str.maketrans('', '', string.punctuation))
# MASKED: basic_tokenize function (lines 60-71)
|
def basic_tokenize(input_str, lang):
"""
The function is used to do some basic tokenization
Args:
input_str: The input string
lang: Language of the input string
Return: a list of tokens of the input string
"""
if lang == constants.ENGLISH:
return word_tokenize(input_str)
return input_str.strip().split(' ')
| 60
| 71
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from copy import deepcopy
from nltk import word_tokenize
from tqdm import tqdm
import nemo.collections.nlp.data.text_normalization.constants as constants
__all__ = ['read_data_file', 'normalize_str']
def read_data_file(fp):
""" Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
"""
insts, w_words, s_words, classes = [], [], [], []
# Read input file
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in line.strip().split('\t')]
if es[0] == '<eos>':
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
# Reset
w_words, s_words, classes = [], [], []
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
def normalize_str(input_str, lang):
""" Normalize an input string """
input_str_tokens = basic_tokenize(input_str.strip().lower(), lang)
input_str = ' '.join(input_str_tokens)
input_str = input_str.replace(' ', ' ')
return input_str
def remove_puncts(input_str):
""" Remove punctuations from an input string """
return input_str.translate(str.maketrans('', '', string.punctuation))
def basic_tokenize(input_str, lang):
"""
The function is used to do some basic tokenization
Args:
input_str: The input string
lang: Language of the input string
Return: a list of tokens of the input string
"""
if lang == constants.ENGLISH:
return word_tokenize(input_str)
return input_str.strip().split(' ')
|
_escapify
|
Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
# MASKED: _escapify function (lines 79-101)
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != r'\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
|
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
| 79
| 101
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != r'\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
|
_truncate_bitmap
|
Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
# MASKED: _truncate_bitmap function (lines 104-116)
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != r'\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
|
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
| 104
| 116
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != r'\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
|
safe_sort_dicom_image_list
|
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
|
# Copyright 2020 University of New South Wales, University of Sydney
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
# Modified from
# https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf
if ds.Units == "CNTS":
# Try to find the Philips private scale factor")
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
"""
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
"""
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
# MASKED: safe_sort_dicom_image_list function (lines 168-196)
def fix_missing_data(contour_data_list):
"""
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
"""
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
"""
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
"""
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
"""
Organise the DICOM files by the series UID
"""
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
"""
! TO DO
Implement a routine to let a user correlate a root directory with a name
"""
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
"""
! TO DO
Need to check for secondary capture image storage
This can include JPEGs with written information on them
This is typically not very useful
We can dump it to file
Or just save the DICOM file in the folder of interest
Not a big problem, sort out another day
"""
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
"""
! TO DO - integrity check
Read in all the files here, check the slice location and determine if any are missing
"""
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
"""
! CHECKPOINT
Some DCE MRI sequences have the same series UID
Here we check the sequence name, and split if necessary
"""
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
"""
! TO DO
What happens if there is an RT structure set with different referenced sequences?
"""
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
"""
! CHECKPOINT
There should only be a single RT dose file (with each series UID)
If there are more, yield each
"""
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
"""
! TO DO
1. (DONE) Implement conversion of dose files (to NIFTI images)
2. Implement conversion of RT plan files to text dump
3. Do something with other files (e.g. Deformable Image Registration stuff)
"""
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
"""
Write output to disk
"""
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
"""
Write the the converted images to disk
! CONSIDER
We could simply write as we go?
Pro: save memory, important if processing very large files
Con: Reading as we go allows proper indexing
"""
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
# First check if there is another structure of the same name
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
"""
TO DO!
Memory issue with output_data_dict
Use in inner loop, reset output_data_dict
"""
return output
|
def safe_sort_dicom_image_list(dicom_image_list):
"""
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
"""
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
| 168
| 196
|
# Copyright 2020 University of New South Wales, University of Sydney
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
# Modified from
# https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf
if ds.Units == "CNTS":
# Try to find the Philips private scale factor")
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
"""
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
"""
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
def safe_sort_dicom_image_list(dicom_image_list):
"""
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
"""
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
def fix_missing_data(contour_data_list):
"""
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
"""
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
"""
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
"""
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
"""
Organise the DICOM files by the series UID
"""
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
"""
! TO DO
Implement a routine to let a user correlate a root directory with a name
"""
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
"""
! TO DO
Need to check for secondary capture image storage
This can include JPEGs with written information on them
This is typically not very useful
We can dump it to file
Or just save the DICOM file in the folder of interest
Not a big problem, sort out another day
"""
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
"""
! TO DO - integrity check
Read in all the files here, check the slice location and determine if any are missing
"""
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
"""
! CHECKPOINT
Some DCE MRI sequences have the same series UID
Here we check the sequence name, and split if necessary
"""
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
"""
! TO DO
What happens if there is an RT structure set with different referenced sequences?
"""
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
"""
! CHECKPOINT
There should only be a single RT dose file (with each series UID)
If there are more, yield each
"""
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
"""
! TO DO
1. (DONE) Implement conversion of dose files (to NIFTI images)
2. Implement conversion of RT plan files to text dump
3. Do something with other files (e.g. Deformable Image Registration stuff)
"""
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
"""
Write output to disk
"""
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
"""
Write the the converted images to disk
! CONSIDER
We could simply write as we go?
Pro: save memory, important if processing very large files
Con: Reading as we go allows proper indexing
"""
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
# First check if there is another structure of the same name
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
"""
TO DO!
Memory issue with output_data_dict
Use in inner loop, reset output_data_dict
"""
return output
|
fix_missing_data
|
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
|
# Copyright 2020 University of New South Wales, University of Sydney
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
# Modified from
# https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf
if ds.Units == "CNTS":
# Try to find the Philips private scale factor")
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
"""
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
"""
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
def safe_sort_dicom_image_list(dicom_image_list):
"""
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
"""
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
# MASKED: fix_missing_data function (lines 199-249)
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
"""
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
"""
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
"""
Organise the DICOM files by the series UID
"""
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
"""
! TO DO
Implement a routine to let a user correlate a root directory with a name
"""
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
"""
! TO DO
Need to check for secondary capture image storage
This can include JPEGs with written information on them
This is typically not very useful
We can dump it to file
Or just save the DICOM file in the folder of interest
Not a big problem, sort out another day
"""
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
"""
! TO DO - integrity check
Read in all the files here, check the slice location and determine if any are missing
"""
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
"""
! CHECKPOINT
Some DCE MRI sequences have the same series UID
Here we check the sequence name, and split if necessary
"""
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
"""
! TO DO
What happens if there is an RT structure set with different referenced sequences?
"""
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
"""
! CHECKPOINT
There should only be a single RT dose file (with each series UID)
If there are more, yield each
"""
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
"""
! TO DO
1. (DONE) Implement conversion of dose files (to NIFTI images)
2. Implement conversion of RT plan files to text dump
3. Do something with other files (e.g. Deformable Image Registration stuff)
"""
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
"""
Write output to disk
"""
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
"""
Write the the converted images to disk
! CONSIDER
We could simply write as we go?
Pro: save memory, important if processing very large files
Con: Reading as we go allows proper indexing
"""
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
# First check if there is another structure of the same name
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
"""
TO DO!
Memory issue with output_data_dict
Use in inner loop, reset output_data_dict
"""
return output
|
def fix_missing_data(contour_data_list):
"""
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
"""
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
| 199
| 249
|
# Copyright 2020 University of New South Wales, University of Sydney
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
# Modified from
# https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf
if ds.Units == "CNTS":
# Try to find the Philips private scale factor")
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
"""
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
"""
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
def safe_sort_dicom_image_list(dicom_image_list):
"""
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
"""
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
def fix_missing_data(contour_data_list):
"""
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
"""
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
"""
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
"""
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
"""
Organise the DICOM files by the series UID
"""
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
"""
! TO DO
Implement a routine to let a user correlate a root directory with a name
"""
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
"""
! TO DO
Need to check for secondary capture image storage
This can include JPEGs with written information on them
This is typically not very useful
We can dump it to file
Or just save the DICOM file in the folder of interest
Not a big problem, sort out another day
"""
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
"""
! TO DO - integrity check
Read in all the files here, check the slice location and determine if any are missing
"""
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
"""
! CHECKPOINT
Some DCE MRI sequences have the same series UID
Here we check the sequence name, and split if necessary
"""
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
"""
! TO DO
What happens if there is an RT structure set with different referenced sequences?
"""
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
"""
! CHECKPOINT
There should only be a single RT dose file (with each series UID)
If there are more, yield each
"""
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
"""
! TO DO
1. (DONE) Implement conversion of dose files (to NIFTI images)
2. Implement conversion of RT plan files to text dump
3. Do something with other files (e.g. Deformable Image Registration stuff)
"""
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
"""
Write output to disk
"""
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
"""
Write the the converted images to disk
! CONSIDER
We could simply write as we go?
Pro: save memory, important if processing very large files
Con: Reading as we go allows proper indexing
"""
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
# First check if there is another structure of the same name
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
"""
TO DO!
Memory issue with output_data_dict
Use in inner loop, reset output_data_dict
"""
return output
|
ignore_errors_context
|
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
# MASKED: ignore_errors_context function (lines 93-112)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
| 93
| 112
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
simple_warning
|
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
# MASKED: simple_warning function (lines 115-135)
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
| 115
| 135
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
ensure_compatible
|
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
# MASKED: ensure_compatible function (lines 138-208)
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
| 138
| 208
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
determine_adder_scaler
|
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
# MASKED: determine_adder_scaler function (lines 211-270)
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
| 211
| 270
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
set_pyoptsparse_opt
|
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
# MASKED: set_pyoptsparse_opt function (lines 273-333)
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
| 273
| 333
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
format_as_float_or_array
|
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
# MASKED: format_as_float_or_array function (lines 336-389)
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
| 336
| 389
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
find_matches
|
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
# MASKED: find_matches function (lines 436-456)
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
| 436
| 456
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
pad_name
|
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
# MASKED: pad_name function (lines 459-491)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
| 459
| 491
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
printoptions
|
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
# MASKED: printoptions function (lines 553-594)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
| 553
| 594
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
match_includes_excludes
|
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
# MASKED: match_includes_excludes function (lines 817-850)
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
| 817
| 850
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
match_prom_or_abs
|
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
# MASKED: match_prom_or_abs function (lines 853-890)
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
| 853
| 890
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
common_subpath
|
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
# MASKED: common_subpath function (lines 913-947)
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
| 913
| 947
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
_is_slicer_op
|
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
# MASKED: _is_slicer_op function (lines 950-967)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
| 950
| 967
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
convert_src_inds
|
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
# MASKED: convert_src_inds function (lines 1063-1091)
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
| 1,063
| 1,091
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
shape2tuple
|
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
# MASKED: shape2tuple function (lines 1094-1112)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
| 1,094
| 1,112
|
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
|
create_network_interfaces
|
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NetworkInterfacesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
# MASKED: create_network_interfaces function (lines 43-67)
def create_network_interfaces_with_http_info(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_network_interfaces(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.delete_network_interfaces_with_http_info(**kwargs)
return data
def delete_network_interfaces_with_http_info(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_interfaces(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.list_network_interfaces_with_http_info(**kwargs)
return data
def list_network_interfaces_with_http_info(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_network_interfaces(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.update_network_interfaces_with_http_info(**kwargs)
return data
def update_network_interfaces_with_http_info(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
def create_network_interfaces(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.create_network_interfaces_with_http_info(**kwargs)
return data
| 43
| 67
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NetworkInterfacesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_network_interfaces(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.create_network_interfaces_with_http_info(**kwargs)
return data
def create_network_interfaces_with_http_info(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_network_interfaces(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.delete_network_interfaces_with_http_info(**kwargs)
return data
def delete_network_interfaces_with_http_info(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_interfaces(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.list_network_interfaces_with_http_info(**kwargs)
return data
def list_network_interfaces_with_http_info(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_network_interfaces(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.update_network_interfaces_with_http_info(**kwargs)
return data
def update_network_interfaces_with_http_info(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
create_network_interfaces_with_http_info
|
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NetworkInterfacesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_network_interfaces(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.create_network_interfaces_with_http_info(**kwargs)
return data
# MASKED: create_network_interfaces_with_http_info function (lines 69-147)
def delete_network_interfaces(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.delete_network_interfaces_with_http_info(**kwargs)
return data
def delete_network_interfaces_with_http_info(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_interfaces(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.list_network_interfaces_with_http_info(**kwargs)
return data
def list_network_interfaces_with_http_info(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_network_interfaces(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.update_network_interfaces_with_http_info(**kwargs)
return data
def update_network_interfaces_with_http_info(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
def create_network_interfaces_with_http_info(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 69
| 147
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NetworkInterfacesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_network_interfaces(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.create_network_interfaces_with_http_info(**kwargs)
return data
def create_network_interfaces_with_http_info(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_network_interfaces(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.delete_network_interfaces_with_http_info(**kwargs)
return data
def delete_network_interfaces_with_http_info(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_interfaces(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.list_network_interfaces_with_http_info(**kwargs)
return data
def list_network_interfaces_with_http_info(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_network_interfaces(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.update_network_interfaces_with_http_info(**kwargs)
return data
def update_network_interfaces_with_http_info(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
generate_DeepConvLSTM_model
|
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
|
#
# mcfly
#
# Copyright 2017 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM # Comment on HPC
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
"""
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
"""
models = []
for _ in range(0, number_of_models):
if model_type is None: # random model choice:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else: # user-defined model choice:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model # generate_model is a function
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
# MASKED: generate_DeepConvLSTM_model function (lines 140-225)
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
"""
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
outputdim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
""" Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
""" Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
""" Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
"""
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
""" Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
"""
result = 0.001 # Fixed learning rate for Adam #10 ** (-np.random.uniform(low, high))
return result
def get_regularization(low=1, high=4):
""" Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
"""
return 10 ** (-np.random.uniform(low, high))
|
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
"""
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
output_dim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential() # initialize model
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
# reshape a 2 dimensional array per file/person/object into a
# 3 dimensional array
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
# filt: number of filters used in a layer
# filters: vector of filt values
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
# reshape 3 dimensional array back into a 2 dimensional array,
# but now with more dept as we have the the filters for each channel
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
#model.add(LSTM(units=lstm_dim, return_sequences=True,
# activation='tanh'))
# comment following line for HPC
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5)) # dropout before the dense layer
# # set up final dense layer such that every timestamp is given one
# # classification
# model.add(
# TimeDistributed(
# Dense(units=output_dim, kernel_regularizer=l2(regularization_rate))))
# model.add(Activation("softmax"))
# # Final classification layer - per timestep
# model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim]))
# Pool output of all timesteps and perform classification using pooled output
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
| 140
| 225
|
#
# mcfly
#
# Copyright 2017 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM # Comment on HPC
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
"""
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
"""
models = []
for _ in range(0, number_of_models):
if model_type is None: # random model choice:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else: # user-defined model choice:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model # generate_model is a function
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
"""
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
output_dim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential() # initialize model
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
# reshape a 2 dimensional array per file/person/object into a
# 3 dimensional array
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
# filt: number of filters used in a layer
# filters: vector of filt values
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
# reshape 3 dimensional array back into a 2 dimensional array,
# but now with more dept as we have the the filters for each channel
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
#model.add(LSTM(units=lstm_dim, return_sequences=True,
# activation='tanh'))
# comment following line for HPC
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5)) # dropout before the dense layer
# # set up final dense layer such that every timestamp is given one
# # classification
# model.add(
# TimeDistributed(
# Dense(units=output_dim, kernel_regularizer=l2(regularization_rate))))
# model.add(Activation("softmax"))
# # Final classification layer - per timestep
# model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim]))
# Pool output of all timesteps and perform classification using pooled output
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
"""
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
outputdim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
""" Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
""" Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
""" Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
"""
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
""" Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
"""
result = 0.001 # Fixed learning rate for Adam #10 ** (-np.random.uniform(low, high))
return result
def get_regularization(low=1, high=4):
""" Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
"""
return 10 ** (-np.random.uniform(low, high))
|
generate_CNN_model
|
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
|
#
# mcfly
#
# Copyright 2017 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM # Comment on HPC
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
"""
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
"""
models = []
for _ in range(0, number_of_models):
if model_type is None: # random model choice:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else: # user-defined model choice:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model # generate_model is a function
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
"""
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
output_dim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential() # initialize model
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
# reshape a 2 dimensional array per file/person/object into a
# 3 dimensional array
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
# filt: number of filters used in a layer
# filters: vector of filt values
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
# reshape 3 dimensional array back into a 2 dimensional array,
# but now with more dept as we have the the filters for each channel
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
#model.add(LSTM(units=lstm_dim, return_sequences=True,
# activation='tanh'))
# comment following line for HPC
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5)) # dropout before the dense layer
# # set up final dense layer such that every timestamp is given one
# # classification
# model.add(
# TimeDistributed(
# Dense(units=output_dim, kernel_regularizer=l2(regularization_rate))))
# model.add(Activation("softmax"))
# # Final classification layer - per timestep
# model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim]))
# Pool output of all timesteps and perform classification using pooled output
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
# MASKED: generate_CNN_model function (lines 228-293)
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
""" Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
""" Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
""" Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
"""
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
""" Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
"""
result = 0.001 # Fixed learning rate for Adam #10 ** (-np.random.uniform(low, high))
return result
def get_regularization(low=1, high=4):
""" Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
"""
return 10 ** (-np.random.uniform(low, high))
|
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
"""
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
outputdim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
| 228
| 293
|
#
# mcfly
#
# Copyright 2017 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM # Comment on HPC
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
"""
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
"""
models = []
for _ in range(0, number_of_models):
if model_type is None: # random model choice:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else: # user-defined model choice:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model # generate_model is a function
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
"""
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
output_dim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential() # initialize model
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
# reshape a 2 dimensional array per file/person/object into a
# 3 dimensional array
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
# filt: number of filters used in a layer
# filters: vector of filt values
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
# reshape 3 dimensional array back into a 2 dimensional array,
# but now with more dept as we have the the filters for each channel
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
#model.add(LSTM(units=lstm_dim, return_sequences=True,
# activation='tanh'))
# comment following line for HPC
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5)) # dropout before the dense layer
# # set up final dense layer such that every timestamp is given one
# # classification
# model.add(
# TimeDistributed(
# Dense(units=output_dim, kernel_regularizer=l2(regularization_rate))))
# model.add(Activation("softmax"))
# # Final classification layer - per timestep
# model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim]))
# Pool output of all timesteps and perform classification using pooled output
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
"""
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
outputdim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
""" Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
""" Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
""" Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
"""
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
""" Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
"""
result = 0.001 # Fixed learning rate for Adam #10 ** (-np.random.uniform(low, high))
return result
def get_regularization(low=1, high=4):
""" Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
"""
return 10 ** (-np.random.uniform(low, high))
|
generate_DeepConvLSTM_hyperparameter_set
|
Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
|
#
# mcfly
#
# Copyright 2017 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM # Comment on HPC
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
"""
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
"""
models = []
for _ in range(0, number_of_models):
if model_type is None: # random model choice:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else: # user-defined model choice:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model # generate_model is a function
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
"""
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
output_dim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential() # initialize model
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
# reshape a 2 dimensional array per file/person/object into a
# 3 dimensional array
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
# filt: number of filters used in a layer
# filters: vector of filt values
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
# reshape 3 dimensional array back into a 2 dimensional array,
# but now with more dept as we have the the filters for each channel
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
#model.add(LSTM(units=lstm_dim, return_sequences=True,
# activation='tanh'))
# comment following line for HPC
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5)) # dropout before the dense layer
# # set up final dense layer such that every timestamp is given one
# # classification
# model.add(
# TimeDistributed(
# Dense(units=output_dim, kernel_regularizer=l2(regularization_rate))))
# model.add(Activation("softmax"))
# # Final classification layer - per timestep
# model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim]))
# Pool output of all timesteps and perform classification using pooled output
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
"""
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
outputdim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
""" Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
# MASKED: generate_DeepConvLSTM_hyperparameter_set function (lines 345-399)
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
""" Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
"""
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
""" Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
"""
result = 0.001 # Fixed learning rate for Adam #10 ** (-np.random.uniform(low, high))
return result
def get_regularization(low=1, high=4):
""" Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
"""
return 10 ** (-np.random.uniform(low, high))
|
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
""" Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
| 345
| 399
|
#
# mcfly
#
# Copyright 2017 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM # Comment on HPC
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
"""
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
"""
models = []
for _ in range(0, number_of_models):
if model_type is None: # random model choice:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else: # user-defined model choice:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model # generate_model is a function
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
"""
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
output_dim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential() # initialize model
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
# reshape a 2 dimensional array per file/person/object into a
# 3 dimensional array
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
# filt: number of filters used in a layer
# filters: vector of filt values
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
# reshape 3 dimensional array back into a 2 dimensional array,
# but now with more dept as we have the the filters for each channel
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
#model.add(LSTM(units=lstm_dim, return_sequences=True,
# activation='tanh'))
# comment following line for HPC
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5)) # dropout before the dense layer
# # set up final dense layer such that every timestamp is given one
# # classification
# model.add(
# TimeDistributed(
# Dense(units=output_dim, kernel_regularizer=l2(regularization_rate))))
# model.add(Activation("softmax"))
# # Final classification layer - per timestep
# model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim]))
# Pool output of all timesteps and perform classification using pooled output
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
"""
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
outputdim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
""" Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
""" Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
""" Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
"""
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
""" Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
"""
result = 0.001 # Fixed learning rate for Adam #10 ** (-np.random.uniform(low, high))
return result
def get_regularization(low=1, high=4):
""" Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
"""
return 10 ** (-np.random.uniform(low, high))
|
presents_for_house
|
https://math.stackexchange.com/a/22744
>>> presents_for_house(1)
10
>>> presents_for_house(2)
30
>>> presents_for_house(3)
40
>>> presents_for_house(8)
150
>>> presents_for_house(9)
130
|
# -*- coding: utf-8 -*-
from functools import cache
INPUT = 33100000
def sigma_pentagonal_numbers(limit):
"""
>>> list(sigma_pentagonal_numbers(16))
[1, 2, 5, 7, 12, 15]
"""
n = 1
p = 1
while p <= limit:
yield p
if n > 0:
n = -n
else:
n = -n + 1
p = (3 * n * n - n) // 2
def sigma_sign_generator():
while True:
yield 1
yield 1
yield -1
yield -1
# MASKED: presents_for_house function (lines 37-68)
def part1(data):
"""
# Takes too long so commented out
# >>> part1(INPUT)
# 776160
"""
house = 0
presents = 0
max = 0
while presents < data:
house += 1
presents = presents_for_house(house)
if presents > max:
max = presents
print(max)
return house
def part2(data):
"""
>>> part2(INPUT)
786240
"""
upper_limit = INPUT
house = [0] * (upper_limit + 1)
elf = 1
while elf <= upper_limit:
elf_end = min(elf * 50, upper_limit)
for number in range(elf, elf_end + 1, elf):
index = number - 1
house[index] += 11 * elf
if house[index] >= data:
upper_limit = min(number, upper_limit)
elf += 1
for i, value in enumerate(house):
if value >= data:
return i + 1
raise ValueError()
def main():
print(part1(INPUT))
print(part2(INPUT))
if __name__ == "__main__":
main()
|
@cache
def presents_for_house(house):
"""
https://math.stackexchange.com/a/22744
>>> presents_for_house(1)
10
>>> presents_for_house(2)
30
>>> presents_for_house(3)
40
>>> presents_for_house(8)
150
>>> presents_for_house(9)
130
"""
if house == 1:
return 10
presents = 0
sign = sigma_sign_generator()
for p in sigma_pentagonal_numbers(house):
n = house - p
if n == 0:
presents += house * next(sign) * 10
else:
presents += presents_for_house(n) * next(sign)
return presents
| 37
| 68
|
# -*- coding: utf-8 -*-
from functools import cache
INPUT = 33100000
def sigma_pentagonal_numbers(limit):
"""
>>> list(sigma_pentagonal_numbers(16))
[1, 2, 5, 7, 12, 15]
"""
n = 1
p = 1
while p <= limit:
yield p
if n > 0:
n = -n
else:
n = -n + 1
p = (3 * n * n - n) // 2
def sigma_sign_generator():
while True:
yield 1
yield 1
yield -1
yield -1
@cache
def presents_for_house(house):
"""
https://math.stackexchange.com/a/22744
>>> presents_for_house(1)
10
>>> presents_for_house(2)
30
>>> presents_for_house(3)
40
>>> presents_for_house(8)
150
>>> presents_for_house(9)
130
"""
if house == 1:
return 10
presents = 0
sign = sigma_sign_generator()
for p in sigma_pentagonal_numbers(house):
n = house - p
if n == 0:
presents += house * next(sign) * 10
else:
presents += presents_for_house(n) * next(sign)
return presents
def part1(data):
"""
# Takes too long so commented out
# >>> part1(INPUT)
# 776160
"""
house = 0
presents = 0
max = 0
while presents < data:
house += 1
presents = presents_for_house(house)
if presents > max:
max = presents
print(max)
return house
def part2(data):
"""
>>> part2(INPUT)
786240
"""
upper_limit = INPUT
house = [0] * (upper_limit + 1)
elf = 1
while elf <= upper_limit:
elf_end = min(elf * 50, upper_limit)
for number in range(elf, elf_end + 1, elf):
index = number - 1
house[index] += 11 * elf
if house[index] >= data:
upper_limit = min(number, upper_limit)
elf += 1
for i, value in enumerate(house):
if value >= data:
return i + 1
raise ValueError()
def main():
print(part1(INPUT))
print(part2(INPUT))
if __name__ == "__main__":
main()
|
get_git_changeset
|
Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
|
import datetime
import functools
import os
import subprocess
def get_version(version=None):
"""Return a PEP 440-compliant version number from VERSION."""
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_main_version(version=None):
"""Return main version (X.Y[.Z]) from VERSION."""
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""
Return a tuple of the django version. If version argument is non-empty,
check for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
# MASKED: get_git_changeset function (lines 60-79)
|
@functools.lru_cache()
def get_git_changeset():
"""Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| 60
| 79
|
import datetime
import functools
import os
import subprocess
def get_version(version=None):
"""Return a PEP 440-compliant version number from VERSION."""
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_main_version(version=None):
"""Return main version (X.Y[.Z]) from VERSION."""
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""
Return a tuple of the django version. If version argument is non-empty,
check for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@functools.lru_cache()
def get_git_changeset():
"""Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
|
write_fst_no_silence
|
Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
|
#!/usr/bin/env python3
# Copyright 2018 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0.
# see get_args() below for usage message.
import argparse
import os
import sys
import math
import re
# The use of latin-1 encoding does not preclude reading utf-8. latin-1
# encoding means "treat words as sequences of bytes", and it is compatible
# with utf-8 encoding as well as other encodings such as gbk, as long as the
# spaces are also spaces in ascii (which we check). It is basically how we
# emulate the behavior of python before python3.
sys.stdout = open(1, 'w', encoding='latin-1', closefd=False)
sys.stderr = open(2, 'w', encoding='latin-1', closefd=False)
def get_args():
parser = argparse.ArgumentParser(description="""This script creates the
text form of a lexicon FST, to be compiled by fstcompile using the
appropriate symbol tables (phones.txt and words.txt) . It will mostly
be invoked indirectly via utils/prepare_lang.sh. The output goes to
the stdout.""")
parser.add_argument('--sil-phone', dest='sil_phone', type=str,
help="""Text form of optional-silence phone, e.g. 'SIL'. See also
the --silprob option.""")
parser.add_argument('--sil-prob', dest='sil_prob', type=float, default=0.0,
help="""Probability of silence between words (including at the
beginning and end of word sequences). Must be in the range [0.0, 1.0].
This refers to the optional silence inserted by the lexicon; see
the --silphone option.""")
parser.add_argument('--sil-disambig', dest='sil_disambig', type=str,
help="""Disambiguation symbol to disambiguate silence, e.g. #5.
Will only be supplied if you are creating the version of L.fst
with disambiguation symbols, intended for use with cyclic G.fst.
This symbol was introduced to fix a rather obscure source of
nondeterminism of CLG.fst, that has to do with reordering of
disambiguation symbols and phone symbols.""")
parser.add_argument('--left-context-phones', dest='left_context_phones', type=str,
help="""Only relevant if --nonterminals is also supplied; this relates
to grammar decoding (see http://kaldi-asr.org/doc/grammar.html or
src/doc/grammar.dox). Format is a list of left-context phones,
in text form, one per line. E.g. data/lang/phones/left_context_phones.txt""")
parser.add_argument('--nonterminals', type=str,
help="""If supplied, --left-context-phones must also be supplied.
List of user-defined nonterminal symbols such as #nonterm:contact_list,
one per line. E.g. data/local/dict/nonterminals.txt.""")
parser.add_argument('lexiconp', type=str,
help="""Filename of lexicon with pronunciation probabilities
(normally lexiconp.txt), with lines of the form 'word prob p1 p2...',
e.g. 'a 1.0 ay'""")
args = parser.parse_args()
return args
def read_lexiconp(filename):
"""Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.
Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,
'pron_prob', a float, is the pronunciation probability (which must be >0.0
and would normally be <=1.0), and 'pron' is a list of strings representing phones.
An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).
"""
ans = []
found_empty_prons = False
found_large_pronprobs = False
# See the comment near the top of this file, RE why we use latin-1.
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile("[ \t]+")
for line in f:
a = whitespace.split(line.strip(" \t\r\n"))
if len(a) < 2:
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if word == "<eps>":
# This would clash with the epsilon symbol normally used in OpenFst.
print("{0}: error: found <eps> as a word in lexicon file "
"{1}".format(line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field "
"should be pron-prob".format(sys.argv[0], line.strip(" \t\r\n"), filename),
file=sys.stderr)
sys.exit(1)
prons = a[2:]
if pron_prob <= 0.0:
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
if len(prons) == 0:
found_empty_prons = True
ans.append( (word, pron_prob, prons) )
if pron_prob > 1.0:
found_large_pronprobs = True
if found_empty_prons:
print("{0}: warning: found at least one word with an empty pronunciation "
"in lexicon file {1}.".format(sys.argv[0], filename),
file=sys.stderr)
if found_large_pronprobs:
print("{0}: warning: found at least one word with pron-prob >1.0 "
"in {1}".format(sys.argv[0], filename), file=sys.stderr)
if len(ans) == 0:
print("{0}: error: found no pronunciations in lexicon file {1}".format(
sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
def write_nonterminal_arcs(start_state, loop_state, next_state,
nonterminals, left_context_phones):
"""This function relates to the grammar-decoding setup, see
kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence
and write_fst_silence, and writes to the stdout some extra arcs
in the lexicon FST that relate to nonterminal symbols.
See the section "Special symbols in L.fst,
kaldi-asr.org/doc/grammar.html#grammar_special_l.
start_state: the start-state of L.fst.
loop_state: the state of high out-degree in L.fst where words leave
and enter.
next_state: the number from which this function can start allocating its
own states. the updated value of next_state will be returned.
nonterminals: the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
left_context_phones: a list of phones that may appear as left-context,
e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=start_state, dest=shared_state,
phone='#nonterm_begin', word='#nonterm_begin',
cost=0.0))
for nonterminal in nonterminals:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=shared_state,
phone=nonterminal, word=nonterminal,
cost=0.0))
# this_cost equals log(len(left_context_phones)) but the expression below
# better captures the meaning. Applying this cost to arcs keeps the FST
# stochatic (sum-to-one, like an HMM), so that if we do weight pushing
# things won't get weird. In the grammar-FST code when we splice things
# together we will cancel out this cost, see the function CombineArcs().
this_cost = -math.log(1.0 / len(left_context_phones))
for left_context_phone in left_context_phones:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=shared_state, dest=loop_state,
phone=left_context_phone, word='<eps>', cost=this_cost))
# arc from loop-state to a final-state with #nonterm_end as ilabel and olabel
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=final_state,
phone='#nonterm_end', word='#nonterm_end', cost=0.0))
print("{state}\t{final_cost}".format(
state=final_state, final_cost=0.0))
return next_state
# MASKED: write_fst_no_silence function (lines 173-217)
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig,
nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob != 0.0, meaning there is optional silence
'lexicon' is a list of 3-tuples (word, pron-prob, prons)
as returned by read_lexiconp().
'sil_prob', which is expected to be strictly between 0.. and 1.0, is the
probability of silence
'sil_phone' is the silence phone, e.g. "SIL".
'sil_disambig' is either None, or the silence disambiguation symbol, e.g. "#5".
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
assert sil_prob > 0.0 and sil_prob < 1.0
sil_cost = -math.log(sil_prob)
no_sil_cost = -math.log(1.0 - sil_prob);
start_state = 0
loop_state = 1 # words enter and leave from here
sil_state = 2 # words terminate here when followed by silence; this state
# has a silence transition to loop_state.
next_state = 3 # the next un-allocated state, will be incremented as we go.
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=loop_state,
phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=sil_state,
phone='<eps>', word='<eps>', cost=sil_cost))
if sil_disambig is None:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=loop_state,
phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=sil_disambig_state,
phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_disambig_state, dest=loop_state,
phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state, dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(pron_cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=no_sil_cost + (pron_cost if i <= 0 else 0.0)))
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=sil_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=sil_cost + (pron_cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
"""Writes updated words.txt to 'filename'. 'orig_lines' is the original lines
in the words.txt file as a list of strings (without the newlines);
highest_numbered_symbol is the highest numbered symbol in the original
words.txt; nonterminals is a list of strings like '#nonterm:foo'."""
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = highest_numbered_symbol + 1
for n in [ '#nonterm_begin', '#nonterm_end' ] + nonterminals:
print("{0} {1}".format(n, cur_symbol), file=f)
cur_symbol = cur_symbol + 1
def read_nonterminals(filename):
"""Reads the user-defined nonterminal symbols in 'filename', checks that
it has the expected format and has no duplicates, and returns the nonterminal
symbols as a list of strings, e.g.
['#nonterm:contact_list', '#nonterm:phone_number', ... ]. """
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no nonterminals symbols.".format(filename))
for nonterm in ans:
if nonterm[:9] != '#nonterm:':
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'"
.format(filename, nonterm))
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def read_left_context_phones(filename):
"""Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]"""
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no left-context phones.".format(filename))
whitespace = re.compile("[ \t]+")
for s in ans:
if len(whitespace.split(s)) != 1:
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s) )
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def is_token(s):
"""Returns true if s is a string and is space-free."""
if not isinstance(s, str):
return False
whitespace = re.compile("[ \t\r\n]+")
split_str = whitespace.split(s);
return len(split_str) == 1 and s == split_str[0]
def main():
args = get_args()
lexicon = read_lexiconp(args.lexiconp)
if args.nonterminals is None:
nonterminals, left_context_phones = None, None
else:
if args.left_context_phones is None:
print("{0}: if --nonterminals is specified, --left-context-phones must also "
"be specified".format(sys.argv[0]))
sys.exit(1)
nonterminals = read_nonterminals(args.nonterminals)
left_context_phones = read_left_context_phones(args.left_context_phones)
if args.sil_prob == 0.0:
write_fst_no_silence(lexicon,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
else:
# Do some checking that the options make sense.
if args.sil_prob < 0.0 or args.sil_prob >= 1.0:
print("{0}: invalid value specified --sil-prob={1}".format(
sys.argv[0], args.sil_prob), file=sys.stderr)
sys.exit(1)
if not is_token(args.sil_phone):
print("{0}: you specified --sil-prob={1} but --sil-phone is set "
"to '{2}'".format(sys.argv[0], args.sil_prob, args.sil_phone),
file=sys.stderr)
sys.exit(1)
if args.sil_disambig is not None and not is_token(args.sil_disambig):
print("{0}: invalid value --sil-disambig='{1}' was specified."
"".format(sys.argv[0], args.sil_disambig), file=sys.stderr)
sys.exit(1)
write_fst_with_silence(lexicon, args.sil_prob, args.sil_phone,
args.sil_disambig,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
# (lines, highest_symbol) = read_words_txt(args.input_words_txt)
# nonterminals = read_nonterminals(args.nonterminal_symbols_list)
# write_words_txt(lines, highest_symbol, nonterminals, args.output_words_txt)
if __name__ == '__main__':
main()
|
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
loop_state = 0
next_state = 1 # the next un-allocated state, will be incremented as we go.
for (word, pronprob, pron) in lexicon:
cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=(cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
| 173
| 217
|
#!/usr/bin/env python3
# Copyright 2018 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0.
# see get_args() below for usage message.
import argparse
import os
import sys
import math
import re
# The use of latin-1 encoding does not preclude reading utf-8. latin-1
# encoding means "treat words as sequences of bytes", and it is compatible
# with utf-8 encoding as well as other encodings such as gbk, as long as the
# spaces are also spaces in ascii (which we check). It is basically how we
# emulate the behavior of python before python3.
sys.stdout = open(1, 'w', encoding='latin-1', closefd=False)
sys.stderr = open(2, 'w', encoding='latin-1', closefd=False)
def get_args():
parser = argparse.ArgumentParser(description="""This script creates the
text form of a lexicon FST, to be compiled by fstcompile using the
appropriate symbol tables (phones.txt and words.txt) . It will mostly
be invoked indirectly via utils/prepare_lang.sh. The output goes to
the stdout.""")
parser.add_argument('--sil-phone', dest='sil_phone', type=str,
help="""Text form of optional-silence phone, e.g. 'SIL'. See also
the --silprob option.""")
parser.add_argument('--sil-prob', dest='sil_prob', type=float, default=0.0,
help="""Probability of silence between words (including at the
beginning and end of word sequences). Must be in the range [0.0, 1.0].
This refers to the optional silence inserted by the lexicon; see
the --silphone option.""")
parser.add_argument('--sil-disambig', dest='sil_disambig', type=str,
help="""Disambiguation symbol to disambiguate silence, e.g. #5.
Will only be supplied if you are creating the version of L.fst
with disambiguation symbols, intended for use with cyclic G.fst.
This symbol was introduced to fix a rather obscure source of
nondeterminism of CLG.fst, that has to do with reordering of
disambiguation symbols and phone symbols.""")
parser.add_argument('--left-context-phones', dest='left_context_phones', type=str,
help="""Only relevant if --nonterminals is also supplied; this relates
to grammar decoding (see http://kaldi-asr.org/doc/grammar.html or
src/doc/grammar.dox). Format is a list of left-context phones,
in text form, one per line. E.g. data/lang/phones/left_context_phones.txt""")
parser.add_argument('--nonterminals', type=str,
help="""If supplied, --left-context-phones must also be supplied.
List of user-defined nonterminal symbols such as #nonterm:contact_list,
one per line. E.g. data/local/dict/nonterminals.txt.""")
parser.add_argument('lexiconp', type=str,
help="""Filename of lexicon with pronunciation probabilities
(normally lexiconp.txt), with lines of the form 'word prob p1 p2...',
e.g. 'a 1.0 ay'""")
args = parser.parse_args()
return args
def read_lexiconp(filename):
"""Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.
Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,
'pron_prob', a float, is the pronunciation probability (which must be >0.0
and would normally be <=1.0), and 'pron' is a list of strings representing phones.
An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).
"""
ans = []
found_empty_prons = False
found_large_pronprobs = False
# See the comment near the top of this file, RE why we use latin-1.
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile("[ \t]+")
for line in f:
a = whitespace.split(line.strip(" \t\r\n"))
if len(a) < 2:
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if word == "<eps>":
# This would clash with the epsilon symbol normally used in OpenFst.
print("{0}: error: found <eps> as a word in lexicon file "
"{1}".format(line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field "
"should be pron-prob".format(sys.argv[0], line.strip(" \t\r\n"), filename),
file=sys.stderr)
sys.exit(1)
prons = a[2:]
if pron_prob <= 0.0:
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
if len(prons) == 0:
found_empty_prons = True
ans.append( (word, pron_prob, prons) )
if pron_prob > 1.0:
found_large_pronprobs = True
if found_empty_prons:
print("{0}: warning: found at least one word with an empty pronunciation "
"in lexicon file {1}.".format(sys.argv[0], filename),
file=sys.stderr)
if found_large_pronprobs:
print("{0}: warning: found at least one word with pron-prob >1.0 "
"in {1}".format(sys.argv[0], filename), file=sys.stderr)
if len(ans) == 0:
print("{0}: error: found no pronunciations in lexicon file {1}".format(
sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
def write_nonterminal_arcs(start_state, loop_state, next_state,
nonterminals, left_context_phones):
"""This function relates to the grammar-decoding setup, see
kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence
and write_fst_silence, and writes to the stdout some extra arcs
in the lexicon FST that relate to nonterminal symbols.
See the section "Special symbols in L.fst,
kaldi-asr.org/doc/grammar.html#grammar_special_l.
start_state: the start-state of L.fst.
loop_state: the state of high out-degree in L.fst where words leave
and enter.
next_state: the number from which this function can start allocating its
own states. the updated value of next_state will be returned.
nonterminals: the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
left_context_phones: a list of phones that may appear as left-context,
e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=start_state, dest=shared_state,
phone='#nonterm_begin', word='#nonterm_begin',
cost=0.0))
for nonterminal in nonterminals:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=shared_state,
phone=nonterminal, word=nonterminal,
cost=0.0))
# this_cost equals log(len(left_context_phones)) but the expression below
# better captures the meaning. Applying this cost to arcs keeps the FST
# stochatic (sum-to-one, like an HMM), so that if we do weight pushing
# things won't get weird. In the grammar-FST code when we splice things
# together we will cancel out this cost, see the function CombineArcs().
this_cost = -math.log(1.0 / len(left_context_phones))
for left_context_phone in left_context_phones:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=shared_state, dest=loop_state,
phone=left_context_phone, word='<eps>', cost=this_cost))
# arc from loop-state to a final-state with #nonterm_end as ilabel and olabel
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=final_state,
phone='#nonterm_end', word='#nonterm_end', cost=0.0))
print("{state}\t{final_cost}".format(
state=final_state, final_cost=0.0))
return next_state
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
loop_state = 0
next_state = 1 # the next un-allocated state, will be incremented as we go.
for (word, pronprob, pron) in lexicon:
cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=(cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig,
nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob != 0.0, meaning there is optional silence
'lexicon' is a list of 3-tuples (word, pron-prob, prons)
as returned by read_lexiconp().
'sil_prob', which is expected to be strictly between 0.. and 1.0, is the
probability of silence
'sil_phone' is the silence phone, e.g. "SIL".
'sil_disambig' is either None, or the silence disambiguation symbol, e.g. "#5".
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
assert sil_prob > 0.0 and sil_prob < 1.0
sil_cost = -math.log(sil_prob)
no_sil_cost = -math.log(1.0 - sil_prob);
start_state = 0
loop_state = 1 # words enter and leave from here
sil_state = 2 # words terminate here when followed by silence; this state
# has a silence transition to loop_state.
next_state = 3 # the next un-allocated state, will be incremented as we go.
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=loop_state,
phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=sil_state,
phone='<eps>', word='<eps>', cost=sil_cost))
if sil_disambig is None:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=loop_state,
phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=sil_disambig_state,
phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_disambig_state, dest=loop_state,
phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state, dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(pron_cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=no_sil_cost + (pron_cost if i <= 0 else 0.0)))
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=sil_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=sil_cost + (pron_cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
"""Writes updated words.txt to 'filename'. 'orig_lines' is the original lines
in the words.txt file as a list of strings (without the newlines);
highest_numbered_symbol is the highest numbered symbol in the original
words.txt; nonterminals is a list of strings like '#nonterm:foo'."""
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = highest_numbered_symbol + 1
for n in [ '#nonterm_begin', '#nonterm_end' ] + nonterminals:
print("{0} {1}".format(n, cur_symbol), file=f)
cur_symbol = cur_symbol + 1
def read_nonterminals(filename):
"""Reads the user-defined nonterminal symbols in 'filename', checks that
it has the expected format and has no duplicates, and returns the nonterminal
symbols as a list of strings, e.g.
['#nonterm:contact_list', '#nonterm:phone_number', ... ]. """
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no nonterminals symbols.".format(filename))
for nonterm in ans:
if nonterm[:9] != '#nonterm:':
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'"
.format(filename, nonterm))
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def read_left_context_phones(filename):
"""Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]"""
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no left-context phones.".format(filename))
whitespace = re.compile("[ \t]+")
for s in ans:
if len(whitespace.split(s)) != 1:
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s) )
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def is_token(s):
"""Returns true if s is a string and is space-free."""
if not isinstance(s, str):
return False
whitespace = re.compile("[ \t\r\n]+")
split_str = whitespace.split(s);
return len(split_str) == 1 and s == split_str[0]
def main():
args = get_args()
lexicon = read_lexiconp(args.lexiconp)
if args.nonterminals is None:
nonterminals, left_context_phones = None, None
else:
if args.left_context_phones is None:
print("{0}: if --nonterminals is specified, --left-context-phones must also "
"be specified".format(sys.argv[0]))
sys.exit(1)
nonterminals = read_nonterminals(args.nonterminals)
left_context_phones = read_left_context_phones(args.left_context_phones)
if args.sil_prob == 0.0:
write_fst_no_silence(lexicon,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
else:
# Do some checking that the options make sense.
if args.sil_prob < 0.0 or args.sil_prob >= 1.0:
print("{0}: invalid value specified --sil-prob={1}".format(
sys.argv[0], args.sil_prob), file=sys.stderr)
sys.exit(1)
if not is_token(args.sil_phone):
print("{0}: you specified --sil-prob={1} but --sil-phone is set "
"to '{2}'".format(sys.argv[0], args.sil_prob, args.sil_phone),
file=sys.stderr)
sys.exit(1)
if args.sil_disambig is not None and not is_token(args.sil_disambig):
print("{0}: invalid value --sil-disambig='{1}' was specified."
"".format(sys.argv[0], args.sil_disambig), file=sys.stderr)
sys.exit(1)
write_fst_with_silence(lexicon, args.sil_prob, args.sil_phone,
args.sil_disambig,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
# (lines, highest_symbol) = read_words_txt(args.input_words_txt)
# nonterminals = read_nonterminals(args.nonterminal_symbols_list)
# write_words_txt(lines, highest_symbol, nonterminals, args.output_words_txt)
if __name__ == '__main__':
main()
|
read_left_context_phones
|
Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]
|
#!/usr/bin/env python3
# Copyright 2018 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0.
# see get_args() below for usage message.
import argparse
import os
import sys
import math
import re
# The use of latin-1 encoding does not preclude reading utf-8. latin-1
# encoding means "treat words as sequences of bytes", and it is compatible
# with utf-8 encoding as well as other encodings such as gbk, as long as the
# spaces are also spaces in ascii (which we check). It is basically how we
# emulate the behavior of python before python3.
sys.stdout = open(1, 'w', encoding='latin-1', closefd=False)
sys.stderr = open(2, 'w', encoding='latin-1', closefd=False)
def get_args():
parser = argparse.ArgumentParser(description="""This script creates the
text form of a lexicon FST, to be compiled by fstcompile using the
appropriate symbol tables (phones.txt and words.txt) . It will mostly
be invoked indirectly via utils/prepare_lang.sh. The output goes to
the stdout.""")
parser.add_argument('--sil-phone', dest='sil_phone', type=str,
help="""Text form of optional-silence phone, e.g. 'SIL'. See also
the --silprob option.""")
parser.add_argument('--sil-prob', dest='sil_prob', type=float, default=0.0,
help="""Probability of silence between words (including at the
beginning and end of word sequences). Must be in the range [0.0, 1.0].
This refers to the optional silence inserted by the lexicon; see
the --silphone option.""")
parser.add_argument('--sil-disambig', dest='sil_disambig', type=str,
help="""Disambiguation symbol to disambiguate silence, e.g. #5.
Will only be supplied if you are creating the version of L.fst
with disambiguation symbols, intended for use with cyclic G.fst.
This symbol was introduced to fix a rather obscure source of
nondeterminism of CLG.fst, that has to do with reordering of
disambiguation symbols and phone symbols.""")
parser.add_argument('--left-context-phones', dest='left_context_phones', type=str,
help="""Only relevant if --nonterminals is also supplied; this relates
to grammar decoding (see http://kaldi-asr.org/doc/grammar.html or
src/doc/grammar.dox). Format is a list of left-context phones,
in text form, one per line. E.g. data/lang/phones/left_context_phones.txt""")
parser.add_argument('--nonterminals', type=str,
help="""If supplied, --left-context-phones must also be supplied.
List of user-defined nonterminal symbols such as #nonterm:contact_list,
one per line. E.g. data/local/dict/nonterminals.txt.""")
parser.add_argument('lexiconp', type=str,
help="""Filename of lexicon with pronunciation probabilities
(normally lexiconp.txt), with lines of the form 'word prob p1 p2...',
e.g. 'a 1.0 ay'""")
args = parser.parse_args()
return args
def read_lexiconp(filename):
"""Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.
Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,
'pron_prob', a float, is the pronunciation probability (which must be >0.0
and would normally be <=1.0), and 'pron' is a list of strings representing phones.
An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).
"""
ans = []
found_empty_prons = False
found_large_pronprobs = False
# See the comment near the top of this file, RE why we use latin-1.
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile("[ \t]+")
for line in f:
a = whitespace.split(line.strip(" \t\r\n"))
if len(a) < 2:
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if word == "<eps>":
# This would clash with the epsilon symbol normally used in OpenFst.
print("{0}: error: found <eps> as a word in lexicon file "
"{1}".format(line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field "
"should be pron-prob".format(sys.argv[0], line.strip(" \t\r\n"), filename),
file=sys.stderr)
sys.exit(1)
prons = a[2:]
if pron_prob <= 0.0:
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
if len(prons) == 0:
found_empty_prons = True
ans.append( (word, pron_prob, prons) )
if pron_prob > 1.0:
found_large_pronprobs = True
if found_empty_prons:
print("{0}: warning: found at least one word with an empty pronunciation "
"in lexicon file {1}.".format(sys.argv[0], filename),
file=sys.stderr)
if found_large_pronprobs:
print("{0}: warning: found at least one word with pron-prob >1.0 "
"in {1}".format(sys.argv[0], filename), file=sys.stderr)
if len(ans) == 0:
print("{0}: error: found no pronunciations in lexicon file {1}".format(
sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
def write_nonterminal_arcs(start_state, loop_state, next_state,
nonterminals, left_context_phones):
"""This function relates to the grammar-decoding setup, see
kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence
and write_fst_silence, and writes to the stdout some extra arcs
in the lexicon FST that relate to nonterminal symbols.
See the section "Special symbols in L.fst,
kaldi-asr.org/doc/grammar.html#grammar_special_l.
start_state: the start-state of L.fst.
loop_state: the state of high out-degree in L.fst where words leave
and enter.
next_state: the number from which this function can start allocating its
own states. the updated value of next_state will be returned.
nonterminals: the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
left_context_phones: a list of phones that may appear as left-context,
e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=start_state, dest=shared_state,
phone='#nonterm_begin', word='#nonterm_begin',
cost=0.0))
for nonterminal in nonterminals:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=shared_state,
phone=nonterminal, word=nonterminal,
cost=0.0))
# this_cost equals log(len(left_context_phones)) but the expression below
# better captures the meaning. Applying this cost to arcs keeps the FST
# stochatic (sum-to-one, like an HMM), so that if we do weight pushing
# things won't get weird. In the grammar-FST code when we splice things
# together we will cancel out this cost, see the function CombineArcs().
this_cost = -math.log(1.0 / len(left_context_phones))
for left_context_phone in left_context_phones:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=shared_state, dest=loop_state,
phone=left_context_phone, word='<eps>', cost=this_cost))
# arc from loop-state to a final-state with #nonterm_end as ilabel and olabel
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=final_state,
phone='#nonterm_end', word='#nonterm_end', cost=0.0))
print("{state}\t{final_cost}".format(
state=final_state, final_cost=0.0))
return next_state
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
loop_state = 0
next_state = 1 # the next un-allocated state, will be incremented as we go.
for (word, pronprob, pron) in lexicon:
cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=(cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig,
nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob != 0.0, meaning there is optional silence
'lexicon' is a list of 3-tuples (word, pron-prob, prons)
as returned by read_lexiconp().
'sil_prob', which is expected to be strictly between 0.. and 1.0, is the
probability of silence
'sil_phone' is the silence phone, e.g. "SIL".
'sil_disambig' is either None, or the silence disambiguation symbol, e.g. "#5".
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
assert sil_prob > 0.0 and sil_prob < 1.0
sil_cost = -math.log(sil_prob)
no_sil_cost = -math.log(1.0 - sil_prob);
start_state = 0
loop_state = 1 # words enter and leave from here
sil_state = 2 # words terminate here when followed by silence; this state
# has a silence transition to loop_state.
next_state = 3 # the next un-allocated state, will be incremented as we go.
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=loop_state,
phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=sil_state,
phone='<eps>', word='<eps>', cost=sil_cost))
if sil_disambig is None:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=loop_state,
phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=sil_disambig_state,
phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_disambig_state, dest=loop_state,
phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state, dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(pron_cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=no_sil_cost + (pron_cost if i <= 0 else 0.0)))
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=sil_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=sil_cost + (pron_cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
"""Writes updated words.txt to 'filename'. 'orig_lines' is the original lines
in the words.txt file as a list of strings (without the newlines);
highest_numbered_symbol is the highest numbered symbol in the original
words.txt; nonterminals is a list of strings like '#nonterm:foo'."""
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = highest_numbered_symbol + 1
for n in [ '#nonterm_begin', '#nonterm_end' ] + nonterminals:
print("{0} {1}".format(n, cur_symbol), file=f)
cur_symbol = cur_symbol + 1
def read_nonterminals(filename):
"""Reads the user-defined nonterminal symbols in 'filename', checks that
it has the expected format and has no duplicates, and returns the nonterminal
symbols as a list of strings, e.g.
['#nonterm:contact_list', '#nonterm:phone_number', ... ]. """
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no nonterminals symbols.".format(filename))
for nonterm in ans:
if nonterm[:9] != '#nonterm:':
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'"
.format(filename, nonterm))
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
# MASKED: read_left_context_phones function (lines 338-351)
def is_token(s):
"""Returns true if s is a string and is space-free."""
if not isinstance(s, str):
return False
whitespace = re.compile("[ \t\r\n]+")
split_str = whitespace.split(s);
return len(split_str) == 1 and s == split_str[0]
def main():
args = get_args()
lexicon = read_lexiconp(args.lexiconp)
if args.nonterminals is None:
nonterminals, left_context_phones = None, None
else:
if args.left_context_phones is None:
print("{0}: if --nonterminals is specified, --left-context-phones must also "
"be specified".format(sys.argv[0]))
sys.exit(1)
nonterminals = read_nonterminals(args.nonterminals)
left_context_phones = read_left_context_phones(args.left_context_phones)
if args.sil_prob == 0.0:
write_fst_no_silence(lexicon,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
else:
# Do some checking that the options make sense.
if args.sil_prob < 0.0 or args.sil_prob >= 1.0:
print("{0}: invalid value specified --sil-prob={1}".format(
sys.argv[0], args.sil_prob), file=sys.stderr)
sys.exit(1)
if not is_token(args.sil_phone):
print("{0}: you specified --sil-prob={1} but --sil-phone is set "
"to '{2}'".format(sys.argv[0], args.sil_prob, args.sil_phone),
file=sys.stderr)
sys.exit(1)
if args.sil_disambig is not None and not is_token(args.sil_disambig):
print("{0}: invalid value --sil-disambig='{1}' was specified."
"".format(sys.argv[0], args.sil_disambig), file=sys.stderr)
sys.exit(1)
write_fst_with_silence(lexicon, args.sil_prob, args.sil_phone,
args.sil_disambig,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
# (lines, highest_symbol) = read_words_txt(args.input_words_txt)
# nonterminals = read_nonterminals(args.nonterminal_symbols_list)
# write_words_txt(lines, highest_symbol, nonterminals, args.output_words_txt)
if __name__ == '__main__':
main()
|
def read_left_context_phones(filename):
"""Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]"""
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no left-context phones.".format(filename))
whitespace = re.compile("[ \t]+")
for s in ans:
if len(whitespace.split(s)) != 1:
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s) )
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
| 338
| 351
|
#!/usr/bin/env python3
# Copyright 2018 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0.
# see get_args() below for usage message.
import argparse
import os
import sys
import math
import re
# The use of latin-1 encoding does not preclude reading utf-8. latin-1
# encoding means "treat words as sequences of bytes", and it is compatible
# with utf-8 encoding as well as other encodings such as gbk, as long as the
# spaces are also spaces in ascii (which we check). It is basically how we
# emulate the behavior of python before python3.
sys.stdout = open(1, 'w', encoding='latin-1', closefd=False)
sys.stderr = open(2, 'w', encoding='latin-1', closefd=False)
def get_args():
parser = argparse.ArgumentParser(description="""This script creates the
text form of a lexicon FST, to be compiled by fstcompile using the
appropriate symbol tables (phones.txt and words.txt) . It will mostly
be invoked indirectly via utils/prepare_lang.sh. The output goes to
the stdout.""")
parser.add_argument('--sil-phone', dest='sil_phone', type=str,
help="""Text form of optional-silence phone, e.g. 'SIL'. See also
the --silprob option.""")
parser.add_argument('--sil-prob', dest='sil_prob', type=float, default=0.0,
help="""Probability of silence between words (including at the
beginning and end of word sequences). Must be in the range [0.0, 1.0].
This refers to the optional silence inserted by the lexicon; see
the --silphone option.""")
parser.add_argument('--sil-disambig', dest='sil_disambig', type=str,
help="""Disambiguation symbol to disambiguate silence, e.g. #5.
Will only be supplied if you are creating the version of L.fst
with disambiguation symbols, intended for use with cyclic G.fst.
This symbol was introduced to fix a rather obscure source of
nondeterminism of CLG.fst, that has to do with reordering of
disambiguation symbols and phone symbols.""")
parser.add_argument('--left-context-phones', dest='left_context_phones', type=str,
help="""Only relevant if --nonterminals is also supplied; this relates
to grammar decoding (see http://kaldi-asr.org/doc/grammar.html or
src/doc/grammar.dox). Format is a list of left-context phones,
in text form, one per line. E.g. data/lang/phones/left_context_phones.txt""")
parser.add_argument('--nonterminals', type=str,
help="""If supplied, --left-context-phones must also be supplied.
List of user-defined nonterminal symbols such as #nonterm:contact_list,
one per line. E.g. data/local/dict/nonterminals.txt.""")
parser.add_argument('lexiconp', type=str,
help="""Filename of lexicon with pronunciation probabilities
(normally lexiconp.txt), with lines of the form 'word prob p1 p2...',
e.g. 'a 1.0 ay'""")
args = parser.parse_args()
return args
def read_lexiconp(filename):
"""Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.
Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,
'pron_prob', a float, is the pronunciation probability (which must be >0.0
and would normally be <=1.0), and 'pron' is a list of strings representing phones.
An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).
"""
ans = []
found_empty_prons = False
found_large_pronprobs = False
# See the comment near the top of this file, RE why we use latin-1.
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile("[ \t]+")
for line in f:
a = whitespace.split(line.strip(" \t\r\n"))
if len(a) < 2:
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if word == "<eps>":
# This would clash with the epsilon symbol normally used in OpenFst.
print("{0}: error: found <eps> as a word in lexicon file "
"{1}".format(line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field "
"should be pron-prob".format(sys.argv[0], line.strip(" \t\r\n"), filename),
file=sys.stderr)
sys.exit(1)
prons = a[2:]
if pron_prob <= 0.0:
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
if len(prons) == 0:
found_empty_prons = True
ans.append( (word, pron_prob, prons) )
if pron_prob > 1.0:
found_large_pronprobs = True
if found_empty_prons:
print("{0}: warning: found at least one word with an empty pronunciation "
"in lexicon file {1}.".format(sys.argv[0], filename),
file=sys.stderr)
if found_large_pronprobs:
print("{0}: warning: found at least one word with pron-prob >1.0 "
"in {1}".format(sys.argv[0], filename), file=sys.stderr)
if len(ans) == 0:
print("{0}: error: found no pronunciations in lexicon file {1}".format(
sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
def write_nonterminal_arcs(start_state, loop_state, next_state,
nonterminals, left_context_phones):
"""This function relates to the grammar-decoding setup, see
kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence
and write_fst_silence, and writes to the stdout some extra arcs
in the lexicon FST that relate to nonterminal symbols.
See the section "Special symbols in L.fst,
kaldi-asr.org/doc/grammar.html#grammar_special_l.
start_state: the start-state of L.fst.
loop_state: the state of high out-degree in L.fst where words leave
and enter.
next_state: the number from which this function can start allocating its
own states. the updated value of next_state will be returned.
nonterminals: the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
left_context_phones: a list of phones that may appear as left-context,
e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=start_state, dest=shared_state,
phone='#nonterm_begin', word='#nonterm_begin',
cost=0.0))
for nonterminal in nonterminals:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=shared_state,
phone=nonterminal, word=nonterminal,
cost=0.0))
# this_cost equals log(len(left_context_phones)) but the expression below
# better captures the meaning. Applying this cost to arcs keeps the FST
# stochatic (sum-to-one, like an HMM), so that if we do weight pushing
# things won't get weird. In the grammar-FST code when we splice things
# together we will cancel out this cost, see the function CombineArcs().
this_cost = -math.log(1.0 / len(left_context_phones))
for left_context_phone in left_context_phones:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=shared_state, dest=loop_state,
phone=left_context_phone, word='<eps>', cost=this_cost))
# arc from loop-state to a final-state with #nonterm_end as ilabel and olabel
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=final_state,
phone='#nonterm_end', word='#nonterm_end', cost=0.0))
print("{state}\t{final_cost}".format(
state=final_state, final_cost=0.0))
return next_state
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
loop_state = 0
next_state = 1 # the next un-allocated state, will be incremented as we go.
for (word, pronprob, pron) in lexicon:
cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=(cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig,
nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob != 0.0, meaning there is optional silence
'lexicon' is a list of 3-tuples (word, pron-prob, prons)
as returned by read_lexiconp().
'sil_prob', which is expected to be strictly between 0.. and 1.0, is the
probability of silence
'sil_phone' is the silence phone, e.g. "SIL".
'sil_disambig' is either None, or the silence disambiguation symbol, e.g. "#5".
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
assert sil_prob > 0.0 and sil_prob < 1.0
sil_cost = -math.log(sil_prob)
no_sil_cost = -math.log(1.0 - sil_prob);
start_state = 0
loop_state = 1 # words enter and leave from here
sil_state = 2 # words terminate here when followed by silence; this state
# has a silence transition to loop_state.
next_state = 3 # the next un-allocated state, will be incremented as we go.
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=loop_state,
phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=sil_state,
phone='<eps>', word='<eps>', cost=sil_cost))
if sil_disambig is None:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=loop_state,
phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=sil_disambig_state,
phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_disambig_state, dest=loop_state,
phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state, dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(pron_cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=no_sil_cost + (pron_cost if i <= 0 else 0.0)))
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=sil_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=sil_cost + (pron_cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
"""Writes updated words.txt to 'filename'. 'orig_lines' is the original lines
in the words.txt file as a list of strings (without the newlines);
highest_numbered_symbol is the highest numbered symbol in the original
words.txt; nonterminals is a list of strings like '#nonterm:foo'."""
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = highest_numbered_symbol + 1
for n in [ '#nonterm_begin', '#nonterm_end' ] + nonterminals:
print("{0} {1}".format(n, cur_symbol), file=f)
cur_symbol = cur_symbol + 1
def read_nonterminals(filename):
"""Reads the user-defined nonterminal symbols in 'filename', checks that
it has the expected format and has no duplicates, and returns the nonterminal
symbols as a list of strings, e.g.
['#nonterm:contact_list', '#nonterm:phone_number', ... ]. """
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no nonterminals symbols.".format(filename))
for nonterm in ans:
if nonterm[:9] != '#nonterm:':
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'"
.format(filename, nonterm))
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def read_left_context_phones(filename):
"""Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]"""
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no left-context phones.".format(filename))
whitespace = re.compile("[ \t]+")
for s in ans:
if len(whitespace.split(s)) != 1:
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s) )
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def is_token(s):
"""Returns true if s is a string and is space-free."""
if not isinstance(s, str):
return False
whitespace = re.compile("[ \t\r\n]+")
split_str = whitespace.split(s);
return len(split_str) == 1 and s == split_str[0]
def main():
args = get_args()
lexicon = read_lexiconp(args.lexiconp)
if args.nonterminals is None:
nonterminals, left_context_phones = None, None
else:
if args.left_context_phones is None:
print("{0}: if --nonterminals is specified, --left-context-phones must also "
"be specified".format(sys.argv[0]))
sys.exit(1)
nonterminals = read_nonterminals(args.nonterminals)
left_context_phones = read_left_context_phones(args.left_context_phones)
if args.sil_prob == 0.0:
write_fst_no_silence(lexicon,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
else:
# Do some checking that the options make sense.
if args.sil_prob < 0.0 or args.sil_prob >= 1.0:
print("{0}: invalid value specified --sil-prob={1}".format(
sys.argv[0], args.sil_prob), file=sys.stderr)
sys.exit(1)
if not is_token(args.sil_phone):
print("{0}: you specified --sil-prob={1} but --sil-phone is set "
"to '{2}'".format(sys.argv[0], args.sil_prob, args.sil_phone),
file=sys.stderr)
sys.exit(1)
if args.sil_disambig is not None and not is_token(args.sil_disambig):
print("{0}: invalid value --sil-disambig='{1}' was specified."
"".format(sys.argv[0], args.sil_disambig), file=sys.stderr)
sys.exit(1)
write_fst_with_silence(lexicon, args.sil_prob, args.sil_phone,
args.sil_disambig,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
# (lines, highest_symbol) = read_words_txt(args.input_words_txt)
# nonterminals = read_nonterminals(args.nonterminal_symbols_list)
# write_words_txt(lines, highest_symbol, nonterminals, args.output_words_txt)
if __name__ == '__main__':
main()
|
inspect_app
|
Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
# MASKED: inspect_app function (lines 24-41)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
| 24
| 41
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
inspect_routes
|
Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
# MASKED: inspect_routes function (lines 44-63)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
| 44
| 63
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
inspect_static_routes
|
Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
# MASKED: inspect_static_routes function (lines 100-115)
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
| 100
| 115
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
inspect_sinks
|
Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
# MASKED: inspect_sinks function (lines 118-133)
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
| 118
| 133
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
inspect_error_handlers
|
Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
# MASKED: inspect_error_handlers function (lines 136-152)
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
| 136
| 152
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
inspect_compiled_router
|
Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
# MASKED: inspect_compiled_router function (lines 197-239)
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
| 197
| 239
|
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
pad_nd_image_and_seg
|
Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then
new_shape the shape stays the same for the dimensions this applies)
:param data:
:param seg:
:param new_shape: if none then only must_be_divisible_by is applied
:param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This
will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).
must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same
length as new_shape
:param pad_mode_data: see np.pad
:param np_pad_kwargs_data:see np.pad
:param pad_mode_seg:see np.pad
:param np_pad_kwargs_seg:see np.pad
:return:
|
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
def center_crop(data, crop_size, seg=None):
return crop(data, seg, crop_size, 0, 'center')
def get_lbs_for_random_crop(crop_size, data_shape, margins):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:param margins:
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
if data_shape[i+2] - crop_size[i] - margins[i] > margins[i]:
lbs.append(np.random.randint(margins[i], data_shape[i+2] - crop_size[i] - margins[i]))
else:
lbs.append((data_shape[i+2] - crop_size[i]) // 2)
return lbs
def get_lbs_for_center_crop(crop_size, data_shape):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
lbs.append((data_shape[i + 2] - crop_size[i]) // 2)
return lbs
def crop(data, seg=None, crop_size=128, margins=(0, 0, 0), crop_type="center",
pad_mode='constant', pad_kwargs={'constant_values': 0},
pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0}):
"""
crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is
determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer
than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be
padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with
margin=0 for the appropriate axes
:param data: b, c, x, y(, z)
:param seg:
:param crop_size:
:param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).
Can be negative (data/seg will be padded if needed)
:param crop_type: random or center
:return:
"""
if not isinstance(data, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
data_shape = tuple([len(data)] + list(data[0].shape))
data_dtype = data[0].dtype
dim = len(data_shape) - 2
if seg is not None:
seg_shape = tuple([len(seg)] + list(seg[0].shape))
seg_dtype = seg[0].dtype
if not isinstance(seg, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
assert all([i == j for i, j in zip(seg_shape[2:], data_shape[2:])]), "data and seg must have the same spatial " \
"dimensions. Data: %s, seg: %s" % \
(str(data_shape), str(seg_shape))
if type(crop_size) not in (tuple, list, np.ndarray):
crop_size = [crop_size] * dim
else:
assert len(crop_size) == len(
data_shape) - 2, "If you provide a list/tuple as center crop make sure it has the same dimension as your " \
"data (2d/3d)"
if not isinstance(margins, (np.ndarray, tuple, list)):
margins = [margins] * dim
data_return = np.zeros([data_shape[0], data_shape[1]] + list(crop_size), dtype=data_dtype)
if seg is not None:
seg_return = np.zeros([seg_shape[0], seg_shape[1]] + list(crop_size), dtype=seg_dtype)
else:
seg_return = None
for b in range(data_shape[0]):
data_shape_here = [data_shape[0]] + list(data[b].shape)
if seg is not None:
seg_shape_here = [seg_shape[0]] + list(seg[b].shape)
if crop_type == "center":
lbs = get_lbs_for_center_crop(crop_size, data_shape_here)
elif crop_type == "random":
lbs = get_lbs_for_random_crop(crop_size, data_shape_here, margins)
else:
raise NotImplementedError("crop_type must be either center or random")
need_to_pad = [[0, 0]] + [[abs(min(0, lbs[d])),
abs(min(0, data_shape_here[d + 2] - (lbs[d] + crop_size[d])))]
for d in range(dim)]
# we should crop first, then pad -> reduces i/o for memmaps, reduces RAM usage and improves speed
ubs = [min(lbs[d] + crop_size[d], data_shape_here[d+2]) for d in range(dim)]
lbs = [max(0, lbs[d]) for d in range(dim)]
slicer_data = [slice(0, data_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
data_cropped = data[b][tuple(slicer_data)]
if seg_return is not None:
slicer_seg = [slice(0, seg_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
seg_cropped = seg[b][tuple(slicer_seg)]
if any([i > 0 for j in need_to_pad for i in j]):
data_return[b] = np.pad(data_cropped, need_to_pad, pad_mode, **pad_kwargs)
if seg_return is not None:
seg_return[b] = np.pad(seg_cropped, need_to_pad, pad_mode_seg, **pad_kwargs_seg)
else:
data_return[b] = data_cropped
if seg_return is not None:
seg_return[b] = seg_cropped
return data_return, seg_return
def random_crop(data, seg=None, crop_size=128, margins=[0, 0, 0]):
return crop(data, seg, crop_size, margins, 'random')
# MASKED: pad_nd_image_and_seg function (lines 146-171)
|
def pad_nd_image_and_seg(data, seg, new_shape=None, must_be_divisible_by=None, pad_mode_data='constant',
np_pad_kwargs_data=None, pad_mode_seg='constant', np_pad_kwargs_seg=None):
"""
Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then
new_shape the shape stays the same for the dimensions this applies)
:param data:
:param seg:
:param new_shape: if none then only must_be_divisible_by is applied
:param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This
will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).
must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same
length as new_shape
:param pad_mode_data: see np.pad
:param np_pad_kwargs_data:see np.pad
:param pad_mode_seg:see np.pad
:param np_pad_kwargs_seg:see np.pad
:return:
"""
sample_data = pad_nd_image(data, new_shape, mode=pad_mode_data, kwargs=np_pad_kwargs_data,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
if seg is not None:
sample_seg = pad_nd_image(seg, new_shape, mode=pad_mode_seg, kwargs=np_pad_kwargs_seg,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
else:
sample_seg = None
return sample_data, sample_seg
| 146
| 171
|
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
def center_crop(data, crop_size, seg=None):
return crop(data, seg, crop_size, 0, 'center')
def get_lbs_for_random_crop(crop_size, data_shape, margins):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:param margins:
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
if data_shape[i+2] - crop_size[i] - margins[i] > margins[i]:
lbs.append(np.random.randint(margins[i], data_shape[i+2] - crop_size[i] - margins[i]))
else:
lbs.append((data_shape[i+2] - crop_size[i]) // 2)
return lbs
def get_lbs_for_center_crop(crop_size, data_shape):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
lbs.append((data_shape[i + 2] - crop_size[i]) // 2)
return lbs
def crop(data, seg=None, crop_size=128, margins=(0, 0, 0), crop_type="center",
pad_mode='constant', pad_kwargs={'constant_values': 0},
pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0}):
"""
crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is
determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer
than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be
padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with
margin=0 for the appropriate axes
:param data: b, c, x, y(, z)
:param seg:
:param crop_size:
:param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).
Can be negative (data/seg will be padded if needed)
:param crop_type: random or center
:return:
"""
if not isinstance(data, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
data_shape = tuple([len(data)] + list(data[0].shape))
data_dtype = data[0].dtype
dim = len(data_shape) - 2
if seg is not None:
seg_shape = tuple([len(seg)] + list(seg[0].shape))
seg_dtype = seg[0].dtype
if not isinstance(seg, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
assert all([i == j for i, j in zip(seg_shape[2:], data_shape[2:])]), "data and seg must have the same spatial " \
"dimensions. Data: %s, seg: %s" % \
(str(data_shape), str(seg_shape))
if type(crop_size) not in (tuple, list, np.ndarray):
crop_size = [crop_size] * dim
else:
assert len(crop_size) == len(
data_shape) - 2, "If you provide a list/tuple as center crop make sure it has the same dimension as your " \
"data (2d/3d)"
if not isinstance(margins, (np.ndarray, tuple, list)):
margins = [margins] * dim
data_return = np.zeros([data_shape[0], data_shape[1]] + list(crop_size), dtype=data_dtype)
if seg is not None:
seg_return = np.zeros([seg_shape[0], seg_shape[1]] + list(crop_size), dtype=seg_dtype)
else:
seg_return = None
for b in range(data_shape[0]):
data_shape_here = [data_shape[0]] + list(data[b].shape)
if seg is not None:
seg_shape_here = [seg_shape[0]] + list(seg[b].shape)
if crop_type == "center":
lbs = get_lbs_for_center_crop(crop_size, data_shape_here)
elif crop_type == "random":
lbs = get_lbs_for_random_crop(crop_size, data_shape_here, margins)
else:
raise NotImplementedError("crop_type must be either center or random")
need_to_pad = [[0, 0]] + [[abs(min(0, lbs[d])),
abs(min(0, data_shape_here[d + 2] - (lbs[d] + crop_size[d])))]
for d in range(dim)]
# we should crop first, then pad -> reduces i/o for memmaps, reduces RAM usage and improves speed
ubs = [min(lbs[d] + crop_size[d], data_shape_here[d+2]) for d in range(dim)]
lbs = [max(0, lbs[d]) for d in range(dim)]
slicer_data = [slice(0, data_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
data_cropped = data[b][tuple(slicer_data)]
if seg_return is not None:
slicer_seg = [slice(0, seg_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
seg_cropped = seg[b][tuple(slicer_seg)]
if any([i > 0 for j in need_to_pad for i in j]):
data_return[b] = np.pad(data_cropped, need_to_pad, pad_mode, **pad_kwargs)
if seg_return is not None:
seg_return[b] = np.pad(seg_cropped, need_to_pad, pad_mode_seg, **pad_kwargs_seg)
else:
data_return[b] = data_cropped
if seg_return is not None:
seg_return[b] = seg_cropped
return data_return, seg_return
def random_crop(data, seg=None, crop_size=128, margins=[0, 0, 0]):
return crop(data, seg, crop_size, margins, 'random')
def pad_nd_image_and_seg(data, seg, new_shape=None, must_be_divisible_by=None, pad_mode_data='constant',
np_pad_kwargs_data=None, pad_mode_seg='constant', np_pad_kwargs_seg=None):
"""
Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then
new_shape the shape stays the same for the dimensions this applies)
:param data:
:param seg:
:param new_shape: if none then only must_be_divisible_by is applied
:param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This
will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).
must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same
length as new_shape
:param pad_mode_data: see np.pad
:param np_pad_kwargs_data:see np.pad
:param pad_mode_seg:see np.pad
:param np_pad_kwargs_seg:see np.pad
:return:
"""
sample_data = pad_nd_image(data, new_shape, mode=pad_mode_data, kwargs=np_pad_kwargs_data,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
if seg is not None:
sample_seg = pad_nd_image(seg, new_shape, mode=pad_mode_seg, kwargs=np_pad_kwargs_seg,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
else:
sample_seg = None
return sample_data, sample_seg
|
run_python_tests
|
Runs the Python tests.
Returns:
True if the tests all succeed, False if there are failures.
|
#!/usr/bin/python
import os
import unittest
""" Script to run the Python tests. """
# MASKED: run_python_tests function (lines 11-26)
if __name__ == "__main__":
run_python_tests()
|
def run_python_tests():
""" Runs the Python tests.
Returns:
True if the tests all succeed, False if there are failures. """
print("Starting tests...")
loader = unittest.TestLoader()
# Get the directory this module is in.
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = loader.discover("rhodopsin/tests", top_level_dir=dir_path)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if not test_result.wasSuccessful():
return False
return True
| 11
| 26
|
#!/usr/bin/python
import os
import unittest
""" Script to run the Python tests. """
def run_python_tests():
""" Runs the Python tests.
Returns:
True if the tests all succeed, False if there are failures. """
print("Starting tests...")
loader = unittest.TestLoader()
# Get the directory this module is in.
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = loader.discover("rhodopsin/tests", top_level_dir=dir_path)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if not test_result.wasSuccessful():
return False
return True
if __name__ == "__main__":
run_python_tests()
|
harmonic_mean
|
The `harmonic mean`_ is a kind of average that is calculated as
the reciprocal_ of the arithmetic mean of the reciprocals.
It is appropriate when calculating averages of rates_.
.. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean
.. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse
.. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)
Equation:
.. math::
H = \frac{n}{\frac{1}{x_1}+\frac{1}{x_2}+\ldots+\frac{1}{x_n}} =
\frac{n}{\sum\limits_{i=1}^n \frac{1}{x_i}}
Args:
x: A list or tuple of numerical objects.
Returns:
A numerical object.
Raises:
TypeError: If the user passes something other than list or tuple.
Examples:
>>> harmonic_mean([1, 2, 4])
1.7142857142857142
>>> harmonic_mean(7)
Traceback (most recent call last):
...
TypeError: harmonic_mean() expects a list or a tuple.
|
"""
Implements harmonic_mean() function.
"""
from .mean import mean
# MASKED: harmonic_mean function (lines 7-46)
|
def harmonic_mean(x):
"""
The `harmonic mean`_ is a kind of average that is calculated as
the reciprocal_ of the arithmetic mean of the reciprocals.
It is appropriate when calculating averages of rates_.
.. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean
.. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse
.. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)
Equation:
.. math::
H = \\frac{n}{\\frac{1}{x_1}+\\frac{1}{x_2}+\\ldots+\\frac{1}{x_n}} =
\\frac{n}{\\sum\\limits_{i=1}^n \\frac{1}{x_i}}
Args:
x: A list or tuple of numerical objects.
Returns:
A numerical object.
Raises:
TypeError: If the user passes something other than list or tuple.
Examples:
>>> harmonic_mean([1, 2, 4])
1.7142857142857142
>>> harmonic_mean(7)
Traceback (most recent call last):
...
TypeError: harmonic_mean() expects a list or a tuple.
"""
if type(x) not in [list, tuple]:
raise TypeError('harmonic_mean() expects a list or a tuple.')
reciprocals = [1 / float(num) for num in x]
# sum_of_reciprocals = sum(reciprocals[:])
return(1 / mean(reciprocals))
| 7
| 46
|
"""
Implements harmonic_mean() function.
"""
from .mean import mean
def harmonic_mean(x):
"""
The `harmonic mean`_ is a kind of average that is calculated as
the reciprocal_ of the arithmetic mean of the reciprocals.
It is appropriate when calculating averages of rates_.
.. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean
.. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse
.. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)
Equation:
.. math::
H = \\frac{n}{\\frac{1}{x_1}+\\frac{1}{x_2}+\\ldots+\\frac{1}{x_n}} =
\\frac{n}{\\sum\\limits_{i=1}^n \\frac{1}{x_i}}
Args:
x: A list or tuple of numerical objects.
Returns:
A numerical object.
Raises:
TypeError: If the user passes something other than list or tuple.
Examples:
>>> harmonic_mean([1, 2, 4])
1.7142857142857142
>>> harmonic_mean(7)
Traceback (most recent call last):
...
TypeError: harmonic_mean() expects a list or a tuple.
"""
if type(x) not in [list, tuple]:
raise TypeError('harmonic_mean() expects a list or a tuple.')
reciprocals = [1 / float(num) for num in x]
# sum_of_reciprocals = sum(reciprocals[:])
return(1 / mean(reciprocals))
|
make_deterministic
|
Make results deterministic. If seed == -1, do not make deterministic.
Running your script in a deterministic way might slow it down.
Note that for some packages (eg: sklearn's PCA) this function is not enough.
|
import torch
import random
import numpy as np
class InfiniteDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
# MASKED: make_deterministic function (lines 24-37)
def setup_logging(output_folder, exist_ok=False, console="debug",
info_filename="info.log", debug_filename="debug.log"):
"""Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
output_folder (str): creates the folder where to save the files.
exist_ok (boolean): if False throw a FileExistsError if output_folder already exists
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file
"""
import os
import sys
import logging
import traceback
if not exist_ok and os.path.exists(output_folder):
raise FileExistsError(f"{output_folder} already exists!")
os.makedirs(output_folder, exist_ok=True)
base_formatter = logging.Formatter('%(asctime)s %(message)s', "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if info_filename != None:
info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if debug_filename != None:
debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if console != None:
console_handler = logging.StreamHandler()
if console == "debug": console_handler.setLevel(logging.DEBUG)
if console == "info": console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def my_handler(type_, value, tb):
logger.info("\n" + "".join(traceback.format_exception(type, value, tb)))
logging.info("Experiment finished (with some errors)")
sys.excepthook = my_handler
|
def make_deterministic(seed=0):
"""Make results deterministic. If seed == -1, do not make deterministic.
Running your script in a deterministic way might slow it down.
Note that for some packages (eg: sklearn's PCA) this function is not enough.
"""
seed = int(seed)
if seed == -1:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 24
| 37
|
import torch
import random
import numpy as np
class InfiniteDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
def make_deterministic(seed=0):
"""Make results deterministic. If seed == -1, do not make deterministic.
Running your script in a deterministic way might slow it down.
Note that for some packages (eg: sklearn's PCA) this function is not enough.
"""
seed = int(seed)
if seed == -1:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def setup_logging(output_folder, exist_ok=False, console="debug",
info_filename="info.log", debug_filename="debug.log"):
"""Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
output_folder (str): creates the folder where to save the files.
exist_ok (boolean): if False throw a FileExistsError if output_folder already exists
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file
"""
import os
import sys
import logging
import traceback
if not exist_ok and os.path.exists(output_folder):
raise FileExistsError(f"{output_folder} already exists!")
os.makedirs(output_folder, exist_ok=True)
base_formatter = logging.Formatter('%(asctime)s %(message)s', "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if info_filename != None:
info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if debug_filename != None:
debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if console != None:
console_handler = logging.StreamHandler()
if console == "debug": console_handler.setLevel(logging.DEBUG)
if console == "info": console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def my_handler(type_, value, tb):
logger.info("\n" + "".join(traceback.format_exception(type, value, tb)))
logging.info("Experiment finished (with some errors)")
sys.excepthook = my_handler
|
normalize_imagenet
|
Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
# MASKED: normalize_imagenet function (lines 55-64)
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
| 55
| 64
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
get_prior_z
|
Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
# MASKED: get_prior_z function (lines 125-137)
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
| 125
| 137
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
forward
|
Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
# MASKED: forward function (lines 267-278)
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
| 267
| 278
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
compute_elbo
|
Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
# MASKED: compute_elbo function (lines 280-296)
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
| 280
| 296
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
decode
|
Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
# MASKED: decode function (lines 308-318)
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
| 308
| 318
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
infer_z
|
Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
# MASKED: infer_z function (lines 320-334)
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
| 320
| 334
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
register
|
Registers the given model with the given admin class. Once a model is
registered in self.registry, we also add it to app registries in
self.apps.
If no model_admin is passed, it will use ModelAdmin2. If keyword
arguments are given they will be passed to the admin class on
instantiation.
If a model is already registered, this will raise ImproperlyConfigured.
|
"""
WARNING: This file about to undergo major refactoring by @pydanny per
Issue #99.
"""
from importlib import import_module
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from . import apiviews
from . import types
from . import utils
from . import views
class Admin2(object):
"""
The base Admin2 object.
It keeps a registry of all registered Models and collects the urls of their
related ModelAdmin2 instances.
It also provides an index view that serves as an entry point to the
admin site.
"""
index_view = views.IndexView
login_view = views.LoginView
app_index_view = views.AppIndexView
api_index_view = apiviews.IndexAPIView
def __init__(self, name='admin2'):
self.registry = {}
self.apps = {}
self.app_verbose_names = {}
self.name = name
# MASKED: register function (lines 37-61)
def deregister(self, model):
"""
Deregisters the given model. Remove the model from the self.app as well
If the model is not already registered, this will raise
ImproperlyConfigured.
"""
try:
del self.registry[model]
except KeyError:
raise ImproperlyConfigured(
'%s was never registered in django-admin2' % model)
# Remove the model from the apps registry
# Get the app label
app_label = utils.model_options(model).app_label
# Delete the model from it's app registry
del self.apps[app_label][model]
# if no more models in an app's registry
# then delete the app from the apps.
if self.apps[app_label] is {}:
del self.apps[app_label] # no
def register_app_verbose_name(self, app_label, app_verbose_name):
"""
Registers the given app label with the given app verbose name.
If a app_label is already registered, this will raise
ImproperlyConfigured.
"""
if app_label in self.app_verbose_names:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % app_label)
self.app_verbose_names[app_label] = app_verbose_name
def deregister_app_verbose_name(self, app_label):
"""
Deregisters the given app label. Remove the app label from the
self.app_verbose_names as well.
If the app label is not already registered, this will raise
ImproperlyConfigured.
"""
try:
del self.app_verbose_names[app_label]
except KeyError:
raise ImproperlyConfigured(
'%s app label was never registered in django-admin2' % app_label)
def autodiscover(self):
"""
Autodiscovers all admin2.py modules for apps in INSTALLED_APPS by
trying to import them.
"""
for app_name in [x for x in settings.INSTALLED_APPS]:
try:
import_module("%s.admin2" % app_name)
except ImportError as e:
if str(e).startswith("No module named") and 'admin2' in str(e):
continue
raise e
def get_admin_by_name(self, name):
"""
Returns the admin instance that was registered with the passed in
name.
"""
for object_admin in self.registry.values():
if object_admin.name == name:
return object_admin
raise ValueError(
u'No object admin found with name {}'.format(repr(name)))
def get_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
'login_view': self.login_view,
}
def get_app_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_api_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_urls(self):
urlpatterns = [
url(regex=r'^$',
view=self.index_view.as_view(**self.get_index_kwargs()),
name='dashboard'
),
url(regex=r'^auth/user/(?P<pk>\d+)/update/password/$',
view=views.PasswordChangeView.as_view(),
name='password_change'
),
url(regex='^password_change_done/$',
view=views.PasswordChangeDoneView.as_view(),
name='password_change_done'
),
url(regex='^logout/$',
view=views.LogoutView.as_view(),
name='logout'
),
url(regex=r'^(?P<app_label>\w+)/$',
view=self.app_index_view.as_view(
**self.get_app_index_kwargs()),
name='app_index'
),
url(regex=r'^api/v0/$',
view=self.api_index_view.as_view(
**self.get_api_index_kwargs()),
name='api_index'
),
]
for model, model_admin in self.registry.items():
model_options = utils.model_options(model)
urlpatterns += [
url('^{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.urls),
url('^api/v0/{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.api_urls),
]
return urlpatterns
@property
def urls(self):
# We set the application and instance namespace here
return self.get_urls(), self.name, self.name
|
def register(self, model, model_admin=None, **kwargs):
"""
Registers the given model with the given admin class. Once a model is
registered in self.registry, we also add it to app registries in
self.apps.
If no model_admin is passed, it will use ModelAdmin2. If keyword
arguments are given they will be passed to the admin class on
instantiation.
If a model is already registered, this will raise ImproperlyConfigured.
"""
if model in self.registry:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % model)
if not model_admin:
model_admin = types.ModelAdmin2
self.registry[model] = model_admin(model, admin=self, **kwargs)
# Add the model to the apps registry
app_label = utils.model_options(model).app_label
if app_label in self.apps.keys():
self.apps[app_label][model] = self.registry[model]
else:
self.apps[app_label] = {model: self.registry[model]}
| 37
| 61
|
"""
WARNING: This file about to undergo major refactoring by @pydanny per
Issue #99.
"""
from importlib import import_module
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from . import apiviews
from . import types
from . import utils
from . import views
class Admin2(object):
"""
The base Admin2 object.
It keeps a registry of all registered Models and collects the urls of their
related ModelAdmin2 instances.
It also provides an index view that serves as an entry point to the
admin site.
"""
index_view = views.IndexView
login_view = views.LoginView
app_index_view = views.AppIndexView
api_index_view = apiviews.IndexAPIView
def __init__(self, name='admin2'):
self.registry = {}
self.apps = {}
self.app_verbose_names = {}
self.name = name
def register(self, model, model_admin=None, **kwargs):
"""
Registers the given model with the given admin class. Once a model is
registered in self.registry, we also add it to app registries in
self.apps.
If no model_admin is passed, it will use ModelAdmin2. If keyword
arguments are given they will be passed to the admin class on
instantiation.
If a model is already registered, this will raise ImproperlyConfigured.
"""
if model in self.registry:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % model)
if not model_admin:
model_admin = types.ModelAdmin2
self.registry[model] = model_admin(model, admin=self, **kwargs)
# Add the model to the apps registry
app_label = utils.model_options(model).app_label
if app_label in self.apps.keys():
self.apps[app_label][model] = self.registry[model]
else:
self.apps[app_label] = {model: self.registry[model]}
def deregister(self, model):
"""
Deregisters the given model. Remove the model from the self.app as well
If the model is not already registered, this will raise
ImproperlyConfigured.
"""
try:
del self.registry[model]
except KeyError:
raise ImproperlyConfigured(
'%s was never registered in django-admin2' % model)
# Remove the model from the apps registry
# Get the app label
app_label = utils.model_options(model).app_label
# Delete the model from it's app registry
del self.apps[app_label][model]
# if no more models in an app's registry
# then delete the app from the apps.
if self.apps[app_label] is {}:
del self.apps[app_label] # no
def register_app_verbose_name(self, app_label, app_verbose_name):
"""
Registers the given app label with the given app verbose name.
If a app_label is already registered, this will raise
ImproperlyConfigured.
"""
if app_label in self.app_verbose_names:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % app_label)
self.app_verbose_names[app_label] = app_verbose_name
def deregister_app_verbose_name(self, app_label):
"""
Deregisters the given app label. Remove the app label from the
self.app_verbose_names as well.
If the app label is not already registered, this will raise
ImproperlyConfigured.
"""
try:
del self.app_verbose_names[app_label]
except KeyError:
raise ImproperlyConfigured(
'%s app label was never registered in django-admin2' % app_label)
def autodiscover(self):
"""
Autodiscovers all admin2.py modules for apps in INSTALLED_APPS by
trying to import them.
"""
for app_name in [x for x in settings.INSTALLED_APPS]:
try:
import_module("%s.admin2" % app_name)
except ImportError as e:
if str(e).startswith("No module named") and 'admin2' in str(e):
continue
raise e
def get_admin_by_name(self, name):
"""
Returns the admin instance that was registered with the passed in
name.
"""
for object_admin in self.registry.values():
if object_admin.name == name:
return object_admin
raise ValueError(
u'No object admin found with name {}'.format(repr(name)))
def get_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
'login_view': self.login_view,
}
def get_app_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_api_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_urls(self):
urlpatterns = [
url(regex=r'^$',
view=self.index_view.as_view(**self.get_index_kwargs()),
name='dashboard'
),
url(regex=r'^auth/user/(?P<pk>\d+)/update/password/$',
view=views.PasswordChangeView.as_view(),
name='password_change'
),
url(regex='^password_change_done/$',
view=views.PasswordChangeDoneView.as_view(),
name='password_change_done'
),
url(regex='^logout/$',
view=views.LogoutView.as_view(),
name='logout'
),
url(regex=r'^(?P<app_label>\w+)/$',
view=self.app_index_view.as_view(
**self.get_app_index_kwargs()),
name='app_index'
),
url(regex=r'^api/v0/$',
view=self.api_index_view.as_view(
**self.get_api_index_kwargs()),
name='api_index'
),
]
for model, model_admin in self.registry.items():
model_options = utils.model_options(model)
urlpatterns += [
url('^{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.urls),
url('^api/v0/{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.api_urls),
]
return urlpatterns
@property
def urls(self):
# We set the application and instance namespace here
return self.get_urls(), self.name, self.name
|
__init__
|
Wrapper for parameter_importance to save the importance-object/ extract the results. We want to show the
top X most important parameter-fanova-plots.
Parameters
----------
runscontainer: RunsContainer
contains all important information about the configurator runs
marginal_threshold: float
parameter/s must be at least this important to be mentioned
|
import operator
import os
from collections import OrderedDict
from pandas import DataFrame
from cave.analyzer.parameter_importance.base_parameter_importance import BaseParameterImportance
class Fanova(BaseParameterImportance):
"""
fANOVA (functional analysis of variance) computes the fraction of the variance in the cost space explained by
changing a parameter by marginalizing over all other parameters, for each parameter (or for pairs of
parameters). Parameters with high importance scores will have a large impact on the performance. To this end, a
random forest is trained as an empirical performance model on the available empirical data from the available
runhistories.
"""
# MASKED: __init__ function (lines 19-35)
def get_name(self):
return 'fANOVA'
def postprocess(self, pimp, output_dir):
result = OrderedDict()
def parse_pairwise(p):
"""parse pimp's way of having pairwise parameters as key as str and return list of individuals"""
res = [tmp.strip('\' ') for tmp in p.strip('[]').split(',')]
return res
parameter_imp = {k: v * 100 for k, v in pimp.evaluator.evaluated_parameter_importance.items()}
param_imp_std = {}
if hasattr(pimp.evaluator, 'evaluated_parameter_importance_uncertainty'):
param_imp_std = {k: v * 100 for k, v in pimp.evaluator.evaluated_parameter_importance_uncertainty.items()}
for k in parameter_imp.keys():
self.logger.debug("fanova-importance for %s: mean (over trees): %f, std: %s", k, parameter_imp[k],
str(param_imp_std[k]) if param_imp_std else 'N/A')
# Split single and pairwise (pairwise are string: "['p1','p2']")
single_imp = {k: v for k, v in parameter_imp.items() if not k.startswith('[') and v > self.marginal_threshold}
pairwise_imp = {k: v for k, v in parameter_imp.items() if k.startswith('[') and v > self.marginal_threshold}
# Set internal parameter importance for further analysis (such as parallel coordinates)
self.fanova_single_importance = single_imp
self.fanova_pairwise_importance = single_imp
# Dicts to lists of tuples, sorted descending after importance
single_imp = OrderedDict(sorted(single_imp.items(), key=operator.itemgetter(1), reverse=True))
pairwise_imp = OrderedDict(sorted(pairwise_imp.items(), key=operator.itemgetter(1), reverse=True))
# Create table
table = []
if len(single_imp) > 0:
table.extend([(20*"-"+" Single importance: "+20*"-", 20*"-")])
for k, v in single_imp.items():
value = str(round(v, 4))
if param_imp_std:
value += " +/- " + str(round(param_imp_std[k], 4))
table.append((k, value))
if len(pairwise_imp) > 0:
table.extend([(20*"-"+" Pairwise importance: "+20*"-", 20*"-")])
for k, v in pairwise_imp.items():
name = ' & '.join(parse_pairwise(k))
value = str(round(v, 4))
if param_imp_std:
value += " +/- " + str(round(param_imp_std[k], 4))
table.append((name, value))
keys, fanova_table = [k[0] for k in table], [k[1:] for k in table]
df = DataFrame(data=fanova_table, index=keys)
result['Importance'] = {'table': df.to_html(escape=False, header=False, index=True, justify='left')}
# Get plot-paths
result['Marginals'] = {p: {'figure': os.path.join(output_dir, "fanova", p + '.png')} for p in single_imp.keys()}
# Right now no way to access paths of the plots -> file issue
pairwise_plots = {" & ".join(parse_pairwise(p)):
os.path.join(output_dir, 'fanova', '_'.join(parse_pairwise(p)) + '.png')
for p in pairwise_imp.keys()}
result['Pairwise Marginals'] = {p: {'figure': path}
for p, path in pairwise_plots.items() if os.path.exists(path)}
return result
def get_jupyter(self):
from IPython.core.display import HTML, Image, display
for b, result in self.result.items():
error = self.result[b]['else'] if 'else' in self.result[b] else None
if error:
display(HTML(error))
else:
# Show table
display(HTML(self.result[b]["Importance"]["table"]))
# Show plots
display(*list([Image(filename=d["figure"]) for d in self.result[b]['Marginals'].values()]))
display(*list([Image(filename=d["figure"]) for d in self.result[b]['Pairwise Marginals'].values()]))
# While working for a prettier solution, this might be an option:
# display(HTML(figure_to_html([d["figure"] for d in self.result[b]['Marginals'].values()] +
# [d["figure"] for d in self.result[b]['Pairwise Marginals'].values()],
# max_in_a_row=3, true_break_between_rows=True)))
|
def __init__(self,
runscontainer,
marginal_threshold=0.05):
"""Wrapper for parameter_importance to save the importance-object/ extract the results. We want to show the
top X most important parameter-fanova-plots.
Parameters
----------
runscontainer: RunsContainer
contains all important information about the configurator runs
marginal_threshold: float
parameter/s must be at least this important to be mentioned
"""
super().__init__(runscontainer)
self.marginal_threshold = marginal_threshold
self.parameter_importance("fanova")
| 19
| 35
|
import operator
import os
from collections import OrderedDict
from pandas import DataFrame
from cave.analyzer.parameter_importance.base_parameter_importance import BaseParameterImportance
class Fanova(BaseParameterImportance):
"""
fANOVA (functional analysis of variance) computes the fraction of the variance in the cost space explained by
changing a parameter by marginalizing over all other parameters, for each parameter (or for pairs of
parameters). Parameters with high importance scores will have a large impact on the performance. To this end, a
random forest is trained as an empirical performance model on the available empirical data from the available
runhistories.
"""
def __init__(self,
runscontainer,
marginal_threshold=0.05):
"""Wrapper for parameter_importance to save the importance-object/ extract the results. We want to show the
top X most important parameter-fanova-plots.
Parameters
----------
runscontainer: RunsContainer
contains all important information about the configurator runs
marginal_threshold: float
parameter/s must be at least this important to be mentioned
"""
super().__init__(runscontainer)
self.marginal_threshold = marginal_threshold
self.parameter_importance("fanova")
def get_name(self):
return 'fANOVA'
def postprocess(self, pimp, output_dir):
result = OrderedDict()
def parse_pairwise(p):
"""parse pimp's way of having pairwise parameters as key as str and return list of individuals"""
res = [tmp.strip('\' ') for tmp in p.strip('[]').split(',')]
return res
parameter_imp = {k: v * 100 for k, v in pimp.evaluator.evaluated_parameter_importance.items()}
param_imp_std = {}
if hasattr(pimp.evaluator, 'evaluated_parameter_importance_uncertainty'):
param_imp_std = {k: v * 100 for k, v in pimp.evaluator.evaluated_parameter_importance_uncertainty.items()}
for k in parameter_imp.keys():
self.logger.debug("fanova-importance for %s: mean (over trees): %f, std: %s", k, parameter_imp[k],
str(param_imp_std[k]) if param_imp_std else 'N/A')
# Split single and pairwise (pairwise are string: "['p1','p2']")
single_imp = {k: v for k, v in parameter_imp.items() if not k.startswith('[') and v > self.marginal_threshold}
pairwise_imp = {k: v for k, v in parameter_imp.items() if k.startswith('[') and v > self.marginal_threshold}
# Set internal parameter importance for further analysis (such as parallel coordinates)
self.fanova_single_importance = single_imp
self.fanova_pairwise_importance = single_imp
# Dicts to lists of tuples, sorted descending after importance
single_imp = OrderedDict(sorted(single_imp.items(), key=operator.itemgetter(1), reverse=True))
pairwise_imp = OrderedDict(sorted(pairwise_imp.items(), key=operator.itemgetter(1), reverse=True))
# Create table
table = []
if len(single_imp) > 0:
table.extend([(20*"-"+" Single importance: "+20*"-", 20*"-")])
for k, v in single_imp.items():
value = str(round(v, 4))
if param_imp_std:
value += " +/- " + str(round(param_imp_std[k], 4))
table.append((k, value))
if len(pairwise_imp) > 0:
table.extend([(20*"-"+" Pairwise importance: "+20*"-", 20*"-")])
for k, v in pairwise_imp.items():
name = ' & '.join(parse_pairwise(k))
value = str(round(v, 4))
if param_imp_std:
value += " +/- " + str(round(param_imp_std[k], 4))
table.append((name, value))
keys, fanova_table = [k[0] for k in table], [k[1:] for k in table]
df = DataFrame(data=fanova_table, index=keys)
result['Importance'] = {'table': df.to_html(escape=False, header=False, index=True, justify='left')}
# Get plot-paths
result['Marginals'] = {p: {'figure': os.path.join(output_dir, "fanova", p + '.png')} for p in single_imp.keys()}
# Right now no way to access paths of the plots -> file issue
pairwise_plots = {" & ".join(parse_pairwise(p)):
os.path.join(output_dir, 'fanova', '_'.join(parse_pairwise(p)) + '.png')
for p in pairwise_imp.keys()}
result['Pairwise Marginals'] = {p: {'figure': path}
for p, path in pairwise_plots.items() if os.path.exists(path)}
return result
def get_jupyter(self):
from IPython.core.display import HTML, Image, display
for b, result in self.result.items():
error = self.result[b]['else'] if 'else' in self.result[b] else None
if error:
display(HTML(error))
else:
# Show table
display(HTML(self.result[b]["Importance"]["table"]))
# Show plots
display(*list([Image(filename=d["figure"]) for d in self.result[b]['Marginals'].values()]))
display(*list([Image(filename=d["figure"]) for d in self.result[b]['Pairwise Marginals'].values()]))
# While working for a prettier solution, this might be an option:
# display(HTML(figure_to_html([d["figure"] for d in self.result[b]['Marginals'].values()] +
# [d["figure"] for d in self.result[b]['Pairwise Marginals'].values()],
# max_in_a_row=3, true_break_between_rows=True)))
|
get_min_max
|
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
|
# MASKED: get_min_max function (lines 1-28)
# Example Test Case of Ten Integers
import random
# Test case 1: random int array
l = [i for i in range(0, 10)] # a list containing 0 - 9
print(f"Test case 1 - random list of int: {l}")
random.shuffle(l)
# Should print "Pass" as the result should be (0, 9)
print ("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
# Test case 2: empty array
print(f"Test case 2 - empty array")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max([])) else "Fail")
# Test case 3: array with single item
print(f"Test case 3 - array with single item")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((1, 1) == get_min_max([1])) else "Fail")
# Test case 4: non array input
print(f"Test case 4 - non array input")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max(10)) else "Fail")
|
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
# Handle non-list input
if not isinstance(ints, list):
return None, None
# Define variables for min and max value and initialize to None
min_value = None
max_value = None
for index, value in enumerate(ints):
if index == 0:
min_value = value
max_value = value
if value < min_value:
min_value = value
elif value > max_value:
max_value = value
return min_value, max_value
| 1
| 28
|
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
# Handle non-list input
if not isinstance(ints, list):
return None, None
# Define variables for min and max value and initialize to None
min_value = None
max_value = None
for index, value in enumerate(ints):
if index == 0:
min_value = value
max_value = value
if value < min_value:
min_value = value
elif value > max_value:
max_value = value
return min_value, max_value
# Example Test Case of Ten Integers
import random
# Test case 1: random int array
l = [i for i in range(0, 10)] # a list containing 0 - 9
print(f"Test case 1 - random list of int: {l}")
random.shuffle(l)
# Should print "Pass" as the result should be (0, 9)
print ("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
# Test case 2: empty array
print(f"Test case 2 - empty array")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max([])) else "Fail")
# Test case 3: array with single item
print(f"Test case 3 - array with single item")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((1, 1) == get_min_max([1])) else "Fail")
# Test case 4: non array input
print(f"Test case 4 - non array input")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max(10)) else "Fail")
|
run_task
|
Runs a named CumulusCI task for the current project with optional
support for overriding task options via kwargs.
Examples:
| =Keyword= | =task_name= | =task_options= | =comment= |
| Run Task | deploy | | Run deploy with standard options |
| Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
|
import logging
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from simple_salesforce import Salesforce
from cumulusci.cli.config import CliRuntime
from cumulusci.core.config import TaskConfig
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.tasks import CURRENT_TASK
from cumulusci.core.utils import import_global
from cumulusci.robotframework.utils import set_pdb_trace
from cumulusci.tasks.robotframework.robotframework import Robot
class CumulusCI(object):
""" Library for accessing CumulusCI for the local git project
This library allows Robot Framework tests to access credentials to a
Salesforce org created by CumulusCI, including Scratch Orgs. It also
exposes the core logic of CumulusCI including interactions with the
Salesforce API's and project specific configuration including custom
and customized tasks and flows.
Initialization requires a single argument, the org name for the target
CumulusCI org. If running your tests via cci's robot task (recommended),
you can initialize the library in your tests taking advantage of the
variable set by the robot task:
| ``*** Settings ***``
|
| Library cumulusci.robotframework.CumulusCI ${ORG}
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(self, org_name=None):
if not org_name:
org_name = "dev"
self.org_name = org_name
self._project_config = None
self._org = None
self._sf = None
self._tooling = None
# Turn off info logging of all http requests
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARN
)
@property
def project_config(self):
if self._project_config is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
# If CumulusCI is running a task, use that task's config
return CURRENT_TASK.stack[0].project_config
else:
logger.console("Initializing CumulusCI config\n")
self._project_config = CliRuntime().project_config
return self._project_config
def set_project_config(self, project_config):
logger.console("\n")
self._project_config = project_config
@property
def keychain(self):
return self.project_config.keychain
@property
def org(self):
if self._org is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
# If CumulusCI is running a task, use that task's org
return CURRENT_TASK.stack[0].org_config
else:
self._org = self.keychain.get_org(self.org_name)
return self._org
@property
def sf(self):
if self._sf is None:
self._sf = self._init_api()
return self._sf
@property
def tooling(self):
if self._tooling is None:
self._tooling = self._init_api("tooling/")
return self._tooling
def set_login_url(self):
""" Sets the LOGIN_URL variable in the suite scope which will
automatically log into the target Salesforce org.
Typically, this is run during Suite Setup
"""
BuiltIn().set_suite_variable("${LOGIN_URL}", self.org.start_url)
def get_org_info(self):
""" Returns a dictionary of the org information for the current target
Salesforce org
"""
return self.org.config
def login_url(self, org=None):
""" Returns the login url which will automatically log into the target
Salesforce org. By default, the org_name passed to the library
constructor is used but this can be overridden with the org option
to log into a different org.
"""
if org is None:
org = self.org
else:
org = self.keychain.get_org(org)
return org.start_url
def get_namespace_prefix(self, package=None):
""" Returns the namespace prefix (including __) for the specified package name.
(Defaults to project__package__name_managed from the current project config.)
Returns an empty string if the package is not installed as a managed package.
"""
result = ""
if package is None:
package = self.project_config.project__package__name_managed
packages = self.tooling.query(
"SELECT SubscriberPackage.NamespacePrefix, SubscriberPackage.Name "
"FROM InstalledSubscriberPackage"
)
match = [
p for p in packages["records"] if p["SubscriberPackage"]["Name"] == package
]
if match:
result = match[0]["SubscriberPackage"]["NamespacePrefix"] + "__"
return result
# MASKED: run_task function (lines 137-150)
def run_task_class(self, class_path, **options):
""" Runs a CumulusCI task class with task options via kwargs.
Use this keyword to run logic from CumulusCI tasks which have not
been configured in the project's cumulusci.yml file. This is
most useful in cases where a test needs to use task logic for
logic unique to the test and thus not worth making into a named
task for the project
Examples:
| =Keyword= | =task_class= | =task_options= |
| Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
"""
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, TaskConfig())
return self._run_task(task_class, task_config)
def _init_api(self, base_url=None):
api_version = self.project_config.project__package__api_version
rv = Salesforce(
instance=self.org.instance_url.replace("https://", ""),
session_id=self.org.access_token,
version=api_version,
)
if base_url is not None:
rv.base_url += base_url
return rv
def _init_task(self, class_path, options, task_config):
task_class = import_global(class_path)
task_config = self._parse_task_options(options, task_class, task_config)
return task_class, task_config
def _parse_task_options(self, options, task_class, task_config):
if "options" not in task_config.config:
task_config.config["options"] = {}
# Parse options and add to task config
if options:
for name, value in options.items():
# Validate the option
if name not in task_class.task_options:
raise TaskOptionsError(
'Option "{}" is not available for task {}'.format(
name, task_class
)
)
# Override the option in the task config
task_config.config["options"][name] = value
return task_config
def _run_task(self, task_class, task_config):
task = task_class(self.project_config, task_config, org_config=self.org)
task()
return task.return_values
def debug(self):
"""Pauses execution and enters the Python debugger."""
set_pdb_trace()
|
def run_task(self, task_name, **options):
""" Runs a named CumulusCI task for the current project with optional
support for overriding task options via kwargs.
Examples:
| =Keyword= | =task_name= | =task_options= | =comment= |
| Run Task | deploy | | Run deploy with standard options |
| Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
"""
task_config = self.project_config.get_task(task_name)
class_path = task_config.class_path
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, task_config)
return self._run_task(task_class, task_config)
| 137
| 150
|
import logging
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from simple_salesforce import Salesforce
from cumulusci.cli.config import CliRuntime
from cumulusci.core.config import TaskConfig
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.tasks import CURRENT_TASK
from cumulusci.core.utils import import_global
from cumulusci.robotframework.utils import set_pdb_trace
from cumulusci.tasks.robotframework.robotframework import Robot
class CumulusCI(object):
""" Library for accessing CumulusCI for the local git project
This library allows Robot Framework tests to access credentials to a
Salesforce org created by CumulusCI, including Scratch Orgs. It also
exposes the core logic of CumulusCI including interactions with the
Salesforce API's and project specific configuration including custom
and customized tasks and flows.
Initialization requires a single argument, the org name for the target
CumulusCI org. If running your tests via cci's robot task (recommended),
you can initialize the library in your tests taking advantage of the
variable set by the robot task:
| ``*** Settings ***``
|
| Library cumulusci.robotframework.CumulusCI ${ORG}
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(self, org_name=None):
if not org_name:
org_name = "dev"
self.org_name = org_name
self._project_config = None
self._org = None
self._sf = None
self._tooling = None
# Turn off info logging of all http requests
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARN
)
@property
def project_config(self):
if self._project_config is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
# If CumulusCI is running a task, use that task's config
return CURRENT_TASK.stack[0].project_config
else:
logger.console("Initializing CumulusCI config\n")
self._project_config = CliRuntime().project_config
return self._project_config
def set_project_config(self, project_config):
logger.console("\n")
self._project_config = project_config
@property
def keychain(self):
return self.project_config.keychain
@property
def org(self):
if self._org is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
# If CumulusCI is running a task, use that task's org
return CURRENT_TASK.stack[0].org_config
else:
self._org = self.keychain.get_org(self.org_name)
return self._org
@property
def sf(self):
if self._sf is None:
self._sf = self._init_api()
return self._sf
@property
def tooling(self):
if self._tooling is None:
self._tooling = self._init_api("tooling/")
return self._tooling
def set_login_url(self):
""" Sets the LOGIN_URL variable in the suite scope which will
automatically log into the target Salesforce org.
Typically, this is run during Suite Setup
"""
BuiltIn().set_suite_variable("${LOGIN_URL}", self.org.start_url)
def get_org_info(self):
""" Returns a dictionary of the org information for the current target
Salesforce org
"""
return self.org.config
def login_url(self, org=None):
""" Returns the login url which will automatically log into the target
Salesforce org. By default, the org_name passed to the library
constructor is used but this can be overridden with the org option
to log into a different org.
"""
if org is None:
org = self.org
else:
org = self.keychain.get_org(org)
return org.start_url
def get_namespace_prefix(self, package=None):
""" Returns the namespace prefix (including __) for the specified package name.
(Defaults to project__package__name_managed from the current project config.)
Returns an empty string if the package is not installed as a managed package.
"""
result = ""
if package is None:
package = self.project_config.project__package__name_managed
packages = self.tooling.query(
"SELECT SubscriberPackage.NamespacePrefix, SubscriberPackage.Name "
"FROM InstalledSubscriberPackage"
)
match = [
p for p in packages["records"] if p["SubscriberPackage"]["Name"] == package
]
if match:
result = match[0]["SubscriberPackage"]["NamespacePrefix"] + "__"
return result
def run_task(self, task_name, **options):
""" Runs a named CumulusCI task for the current project with optional
support for overriding task options via kwargs.
Examples:
| =Keyword= | =task_name= | =task_options= | =comment= |
| Run Task | deploy | | Run deploy with standard options |
| Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
"""
task_config = self.project_config.get_task(task_name)
class_path = task_config.class_path
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, task_config)
return self._run_task(task_class, task_config)
def run_task_class(self, class_path, **options):
""" Runs a CumulusCI task class with task options via kwargs.
Use this keyword to run logic from CumulusCI tasks which have not
been configured in the project's cumulusci.yml file. This is
most useful in cases where a test needs to use task logic for
logic unique to the test and thus not worth making into a named
task for the project
Examples:
| =Keyword= | =task_class= | =task_options= |
| Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
"""
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, TaskConfig())
return self._run_task(task_class, task_config)
def _init_api(self, base_url=None):
api_version = self.project_config.project__package__api_version
rv = Salesforce(
instance=self.org.instance_url.replace("https://", ""),
session_id=self.org.access_token,
version=api_version,
)
if base_url is not None:
rv.base_url += base_url
return rv
def _init_task(self, class_path, options, task_config):
task_class = import_global(class_path)
task_config = self._parse_task_options(options, task_class, task_config)
return task_class, task_config
def _parse_task_options(self, options, task_class, task_config):
if "options" not in task_config.config:
task_config.config["options"] = {}
# Parse options and add to task config
if options:
for name, value in options.items():
# Validate the option
if name not in task_class.task_options:
raise TaskOptionsError(
'Option "{}" is not available for task {}'.format(
name, task_class
)
)
# Override the option in the task config
task_config.config["options"][name] = value
return task_config
def _run_task(self, task_class, task_config):
task = task_class(self.project_config, task_config, org_config=self.org)
task()
return task.return_values
def debug(self):
"""Pauses execution and enters the Python debugger."""
set_pdb_trace()
|
__init__
|
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SigningJobDestinationArgs',
'SigningJobDestinationS3Args',
'SigningJobRevocationRecordArgs',
'SigningJobSignedObjectArgs',
'SigningJobSignedObjectS3Args',
'SigningJobSourceArgs',
'SigningJobSourceS3Args',
'SigningProfileRevocationRecordArgs',
'SigningProfileSignatureValidityPeriodArgs',
]
@pulumi.input_type
class SigningJobDestinationArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobDestinationS3Args']):
"""
:param pulumi.Input['SigningJobDestinationS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobDestinationS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobDestinationS3Args:
# MASKED: __init__ function (lines 47-56)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class SigningJobRevocationRecordArgs:
def __init__(__self__, *,
reason: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if reason is not None:
pulumi.set(__self__, "reason", reason)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningJobSignedObjectArgs:
def __init__(__self__, *,
s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3s")
@s3s.setter
def s3s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]):
pulumi.set(self, "s3s", value)
@pulumi.input_type
class SigningJobSignedObjectS3Args:
def __init__(__self__, *,
bucket: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
"""
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@pulumi.input_type
class SigningJobSourceArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobSourceS3Args']):
"""
:param pulumi.Input['SigningJobSourceS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobSourceS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobSourceS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobSourceS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
version: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
:param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Version of your source image in your version enabled S3 bucket.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SigningProfileRevocationRecordArgs:
def __init__(__self__, *,
revocation_effective_from: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if revocation_effective_from is not None:
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revocation_effective_from")
@revocation_effective_from.setter
def revocation_effective_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revocation_effective_from", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningProfileSignatureValidityPeriodArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
value: pulumi.Input[int]):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
|
def __init__(__self__, *,
bucket: pulumi.Input[str],
prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
| 47
| 56
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SigningJobDestinationArgs',
'SigningJobDestinationS3Args',
'SigningJobRevocationRecordArgs',
'SigningJobSignedObjectArgs',
'SigningJobSignedObjectS3Args',
'SigningJobSourceArgs',
'SigningJobSourceS3Args',
'SigningProfileRevocationRecordArgs',
'SigningProfileSignatureValidityPeriodArgs',
]
@pulumi.input_type
class SigningJobDestinationArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobDestinationS3Args']):
"""
:param pulumi.Input['SigningJobDestinationS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobDestinationS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobDestinationS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class SigningJobRevocationRecordArgs:
def __init__(__self__, *,
reason: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if reason is not None:
pulumi.set(__self__, "reason", reason)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningJobSignedObjectArgs:
def __init__(__self__, *,
s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3s")
@s3s.setter
def s3s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]):
pulumi.set(self, "s3s", value)
@pulumi.input_type
class SigningJobSignedObjectS3Args:
def __init__(__self__, *,
bucket: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
"""
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@pulumi.input_type
class SigningJobSourceArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobSourceS3Args']):
"""
:param pulumi.Input['SigningJobSourceS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobSourceS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobSourceS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobSourceS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
version: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
:param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Version of your source image in your version enabled S3 bucket.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SigningProfileRevocationRecordArgs:
def __init__(__self__, *,
revocation_effective_from: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if revocation_effective_from is not None:
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revocation_effective_from")
@revocation_effective_from.setter
def revocation_effective_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revocation_effective_from", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningProfileSignatureValidityPeriodArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
value: pulumi.Input[int]):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
|
__init__
|
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SigningJobDestinationArgs',
'SigningJobDestinationS3Args',
'SigningJobRevocationRecordArgs',
'SigningJobSignedObjectArgs',
'SigningJobSignedObjectS3Args',
'SigningJobSourceArgs',
'SigningJobSourceS3Args',
'SigningProfileRevocationRecordArgs',
'SigningProfileSignatureValidityPeriodArgs',
]
@pulumi.input_type
class SigningJobDestinationArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobDestinationS3Args']):
"""
:param pulumi.Input['SigningJobDestinationS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobDestinationS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobDestinationS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class SigningJobRevocationRecordArgs:
def __init__(__self__, *,
reason: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if reason is not None:
pulumi.set(__self__, "reason", reason)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningJobSignedObjectArgs:
def __init__(__self__, *,
s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3s")
@s3s.setter
def s3s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]):
pulumi.set(self, "s3s", value)
@pulumi.input_type
class SigningJobSignedObjectS3Args:
# MASKED: __init__ function (lines 149-159)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@pulumi.input_type
class SigningJobSourceArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobSourceS3Args']):
"""
:param pulumi.Input['SigningJobSourceS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobSourceS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobSourceS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobSourceS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
version: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
:param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Version of your source image in your version enabled S3 bucket.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SigningProfileRevocationRecordArgs:
def __init__(__self__, *,
revocation_effective_from: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if revocation_effective_from is not None:
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revocation_effective_from")
@revocation_effective_from.setter
def revocation_effective_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revocation_effective_from", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningProfileSignatureValidityPeriodArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
value: pulumi.Input[int]):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
|
def __init__(__self__, *,
bucket: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
"""
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if key is not None:
pulumi.set(__self__, "key", key)
| 149
| 159
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SigningJobDestinationArgs',
'SigningJobDestinationS3Args',
'SigningJobRevocationRecordArgs',
'SigningJobSignedObjectArgs',
'SigningJobSignedObjectS3Args',
'SigningJobSourceArgs',
'SigningJobSourceS3Args',
'SigningProfileRevocationRecordArgs',
'SigningProfileSignatureValidityPeriodArgs',
]
@pulumi.input_type
class SigningJobDestinationArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobDestinationS3Args']):
"""
:param pulumi.Input['SigningJobDestinationS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobDestinationS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobDestinationS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class SigningJobRevocationRecordArgs:
def __init__(__self__, *,
reason: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if reason is not None:
pulumi.set(__self__, "reason", reason)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningJobSignedObjectArgs:
def __init__(__self__, *,
s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3s")
@s3s.setter
def s3s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]):
pulumi.set(self, "s3s", value)
@pulumi.input_type
class SigningJobSignedObjectS3Args:
def __init__(__self__, *,
bucket: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
"""
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@pulumi.input_type
class SigningJobSourceArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobSourceS3Args']):
"""
:param pulumi.Input['SigningJobSourceS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobSourceS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobSourceS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobSourceS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
version: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
:param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Version of your source image in your version enabled S3 bucket.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SigningProfileRevocationRecordArgs:
def __init__(__self__, *,
revocation_effective_from: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if revocation_effective_from is not None:
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revocation_effective_from")
@revocation_effective_from.setter
def revocation_effective_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revocation_effective_from", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningProfileSignatureValidityPeriodArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
value: pulumi.Input[int]):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
|
__init__
|
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
:param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SigningJobDestinationArgs',
'SigningJobDestinationS3Args',
'SigningJobRevocationRecordArgs',
'SigningJobSignedObjectArgs',
'SigningJobSignedObjectS3Args',
'SigningJobSourceArgs',
'SigningJobSourceS3Args',
'SigningProfileRevocationRecordArgs',
'SigningProfileSignatureValidityPeriodArgs',
]
@pulumi.input_type
class SigningJobDestinationArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobDestinationS3Args']):
"""
:param pulumi.Input['SigningJobDestinationS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobDestinationS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobDestinationS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class SigningJobRevocationRecordArgs:
def __init__(__self__, *,
reason: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if reason is not None:
pulumi.set(__self__, "reason", reason)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningJobSignedObjectArgs:
def __init__(__self__, *,
s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3s")
@s3s.setter
def s3s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]):
pulumi.set(self, "s3s", value)
@pulumi.input_type
class SigningJobSignedObjectS3Args:
def __init__(__self__, *,
bucket: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
"""
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@pulumi.input_type
class SigningJobSourceArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobSourceS3Args']):
"""
:param pulumi.Input['SigningJobSourceS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobSourceS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobSourceS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobSourceS3Args:
# MASKED: __init__ function (lines 210-221)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Version of your source image in your version enabled S3 bucket.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SigningProfileRevocationRecordArgs:
def __init__(__self__, *,
revocation_effective_from: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if revocation_effective_from is not None:
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revocation_effective_from")
@revocation_effective_from.setter
def revocation_effective_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revocation_effective_from", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningProfileSignatureValidityPeriodArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
value: pulumi.Input[int]):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
|
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
version: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
:param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
| 210
| 221
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SigningJobDestinationArgs',
'SigningJobDestinationS3Args',
'SigningJobRevocationRecordArgs',
'SigningJobSignedObjectArgs',
'SigningJobSignedObjectS3Args',
'SigningJobSourceArgs',
'SigningJobSourceS3Args',
'SigningProfileRevocationRecordArgs',
'SigningProfileSignatureValidityPeriodArgs',
]
@pulumi.input_type
class SigningJobDestinationArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobDestinationS3Args']):
"""
:param pulumi.Input['SigningJobDestinationS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobDestinationS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobDestinationS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class SigningJobRevocationRecordArgs:
def __init__(__self__, *,
reason: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if reason is not None:
pulumi.set(__self__, "reason", reason)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningJobSignedObjectArgs:
def __init__(__self__, *,
s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3s")
@s3s.setter
def s3s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]):
pulumi.set(self, "s3s", value)
@pulumi.input_type
class SigningJobSignedObjectS3Args:
def __init__(__self__, *,
bucket: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
"""
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@pulumi.input_type
class SigningJobSourceArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobSourceS3Args']):
"""
:param pulumi.Input['SigningJobSourceS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobSourceS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobSourceS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobSourceS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
version: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
:param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Version of your source image in your version enabled S3 bucket.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SigningProfileRevocationRecordArgs:
def __init__(__self__, *,
revocation_effective_from: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if revocation_effective_from is not None:
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revocation_effective_from")
@revocation_effective_from.setter
def revocation_effective_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revocation_effective_from", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningProfileSignatureValidityPeriodArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
value: pulumi.Input[int]):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
|
get_network_interface_tap_configuration
|
Use this data source to access information about an existing resource.
:param str network_interface_name: The name of the network interface.
:param str resource_group_name: The name of the resource group.
:param str tap_configuration_name: The name of the tap configuration.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkInterfaceTapConfigurationResult',
'AwaitableGetNetworkInterfaceTapConfigurationResult',
'get_network_interface_tap_configuration',
]
@pulumi.output_type
class GetNetworkInterfaceTapConfigurationResult:
"""
Tap configuration in a Network Interface.
"""
def __init__(__self__, etag=None, name=None, provisioning_state=None, type=None, virtual_network_tap=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network_tap and not isinstance(virtual_network_tap, dict):
raise TypeError("Expected argument 'virtual_network_tap' to be a dict")
pulumi.set(__self__, "virtual_network_tap", virtual_network_tap)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the network interface tap configuration resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Sub Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkTap")
def virtual_network_tap(self) -> Optional['outputs.VirtualNetworkTapResponse']:
"""
The reference to the Virtual Network Tap resource.
"""
return pulumi.get(self, "virtual_network_tap")
class AwaitableGetNetworkInterfaceTapConfigurationResult(GetNetworkInterfaceTapConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkInterfaceTapConfigurationResult(
etag=self.etag,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type,
virtual_network_tap=self.virtual_network_tap)
# MASKED: get_network_interface_tap_configuration function (lines 94-120)
|
def get_network_interface_tap_configuration(network_interface_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
tap_configuration_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInterfaceTapConfigurationResult:
"""
Use this data source to access information about an existing resource.
:param str network_interface_name: The name of the network interface.
:param str resource_group_name: The name of the resource group.
:param str tap_configuration_name: The name of the tap configuration.
"""
__args__ = dict()
__args__['networkInterfaceName'] = network_interface_name
__args__['resourceGroupName'] = resource_group_name
__args__['tapConfigurationName'] = tap_configuration_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200301:getNetworkInterfaceTapConfiguration', __args__, opts=opts, typ=GetNetworkInterfaceTapConfigurationResult).value
return AwaitableGetNetworkInterfaceTapConfigurationResult(
etag=__ret__.etag,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type,
virtual_network_tap=__ret__.virtual_network_tap)
| 94
| 120
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkInterfaceTapConfigurationResult',
'AwaitableGetNetworkInterfaceTapConfigurationResult',
'get_network_interface_tap_configuration',
]
@pulumi.output_type
class GetNetworkInterfaceTapConfigurationResult:
"""
Tap configuration in a Network Interface.
"""
def __init__(__self__, etag=None, name=None, provisioning_state=None, type=None, virtual_network_tap=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network_tap and not isinstance(virtual_network_tap, dict):
raise TypeError("Expected argument 'virtual_network_tap' to be a dict")
pulumi.set(__self__, "virtual_network_tap", virtual_network_tap)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the network interface tap configuration resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Sub Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkTap")
def virtual_network_tap(self) -> Optional['outputs.VirtualNetworkTapResponse']:
"""
The reference to the Virtual Network Tap resource.
"""
return pulumi.get(self, "virtual_network_tap")
class AwaitableGetNetworkInterfaceTapConfigurationResult(GetNetworkInterfaceTapConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkInterfaceTapConfigurationResult(
etag=self.etag,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type,
virtual_network_tap=self.virtual_network_tap)
def get_network_interface_tap_configuration(network_interface_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
tap_configuration_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInterfaceTapConfigurationResult:
"""
Use this data source to access information about an existing resource.
:param str network_interface_name: The name of the network interface.
:param str resource_group_name: The name of the resource group.
:param str tap_configuration_name: The name of the tap configuration.
"""
__args__ = dict()
__args__['networkInterfaceName'] = network_interface_name
__args__['resourceGroupName'] = resource_group_name
__args__['tapConfigurationName'] = tap_configuration_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200301:getNetworkInterfaceTapConfiguration', __args__, opts=opts, typ=GetNetworkInterfaceTapConfigurationResult).value
return AwaitableGetNetworkInterfaceTapConfigurationResult(
etag=__ret__.etag,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type,
virtual_network_tap=__ret__.virtual_network_tap)
|
__init__
|
Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
|
# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
# MASKED: __init__ function (lines 61-67)
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# Reference: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Double DQN Agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
class QValueVisualizationWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env.reset()
self.image_shape = self.env.render(mode="rgb_array").shape
self.q_values = [[0.,0.,0.,0.]]
# self.metadata['video.frames_per_second'] = 60
def set_q_values(self, q_values):
self.q_values = q_values
def render(self, mode="human"):
if mode=="rgb_array":
env_rgb_array = super().render(mode)
fig, ax = plt.subplots(figsize=(self.image_shape[1]/100,self.image_shape[0]/100), constrained_layout=True, dpi=100)
df = pd.DataFrame(np.array(self.q_values).T)
sns.barplot(x=df.index, y=0, data=df, ax=ax)
ax.set(xlabel='actions', ylabel='q-values')
fig.canvas.draw()
X = np.array(fig.canvas.renderer.buffer_rgba())
Image.fromarray(X)
# Image.fromarray(X)
rgb_image = np.array(Image.fromarray(X).convert('RGB'))
plt.close(fig)
q_value_rgb_array = rgb_image
return np.append(env_rgb_array, q_value_rgb_array, axis=1)
else:
super().render(mode)
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
if args.capture_video:
env = QValueVisualizationWrapper(env)
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
# respect the default timelimit
assert isinstance(env.action_space, Discrete), "only discrete action space is supported"
# modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py#
class ReplayBuffer():
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask_lst.append(done_mask)
return np.array(s_lst), np.array(a_lst), \
np.array(r_lst), np.array(s_prime_lst), \
np.array(done_mask_lst)
# ALGO LOGIC: initialize agent here:
# tricks taken from https://github.com/cpnota/autonomous-learning-library/blob/6d1111afce0d1582de463326f7d078a86e850551/all/presets/atari/models/__init__.py#L16
# apparently matters
class Linear0(nn.Linear):
def reset_parameters(self):
nn.init.constant_(self.weight, 0.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
Linear0(512, env.action_space.n)
)
def forward(self, x):
x = torch.Tensor(x).to(device)
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = ReplayBuffer(args.buffer_size)
q_network = QNetwork().to(device)
target_network = QNetwork().to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
# TRY NOT TO MODIFY: start the game
obs = env.reset()
episode_reward = 0
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
obs = np.array(obs)
logits = q_network.forward(obs.reshape((1,)+obs.shape))
if args.capture_video:
env.set_q_values(logits.tolist())
if random.random() < epsilon:
action = env.action_space.sample()
else:
action = torch.argmax(logits, dim=1).tolist()[0]
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, reward, done, info = env.step(action)
episode_reward += reward
# TRY NOT TO MODIFY: record rewards for plotting purposes
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
# ALGO LOGIC: training.
rb.put((obs, action, reward, next_obs, done))
if global_step > args.learning_starts and global_step % args.train_frequency == 0:
s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)
with torch.no_grad():
# target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]
current_value = q_network.forward(s_next_obses)
target_value = target_network.forward(s_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.Tensor(s_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(s_dones).to(device))
old_val = q_network.forward(s_obs).gather(1, torch.LongTensor(s_actions).view(-1,1).to(device)).squeeze()
loss = loss_fn(td_target, old_val)
writer.add_scalar("losses/td_loss", loss, global_step)
# optimize the midel
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
# update the target network
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
if done:
# important to note that because `EpisodicLifeEnv` wrapper is applied,
# the real episode reward is actually the sum of episode reward of 5 lives
# which we record through `info['episode']['r']` provided by gym.wrappers.RecordEpisodeStatistics
obs, episode_reward = env.reset(), 0
env.close()
writer.close()
|
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
| 61
| 67
|
# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# Reference: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Double DQN Agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
class QValueVisualizationWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env.reset()
self.image_shape = self.env.render(mode="rgb_array").shape
self.q_values = [[0.,0.,0.,0.]]
# self.metadata['video.frames_per_second'] = 60
def set_q_values(self, q_values):
self.q_values = q_values
def render(self, mode="human"):
if mode=="rgb_array":
env_rgb_array = super().render(mode)
fig, ax = plt.subplots(figsize=(self.image_shape[1]/100,self.image_shape[0]/100), constrained_layout=True, dpi=100)
df = pd.DataFrame(np.array(self.q_values).T)
sns.barplot(x=df.index, y=0, data=df, ax=ax)
ax.set(xlabel='actions', ylabel='q-values')
fig.canvas.draw()
X = np.array(fig.canvas.renderer.buffer_rgba())
Image.fromarray(X)
# Image.fromarray(X)
rgb_image = np.array(Image.fromarray(X).convert('RGB'))
plt.close(fig)
q_value_rgb_array = rgb_image
return np.append(env_rgb_array, q_value_rgb_array, axis=1)
else:
super().render(mode)
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
if args.capture_video:
env = QValueVisualizationWrapper(env)
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
# respect the default timelimit
assert isinstance(env.action_space, Discrete), "only discrete action space is supported"
# modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py#
class ReplayBuffer():
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask_lst.append(done_mask)
return np.array(s_lst), np.array(a_lst), \
np.array(r_lst), np.array(s_prime_lst), \
np.array(done_mask_lst)
# ALGO LOGIC: initialize agent here:
# tricks taken from https://github.com/cpnota/autonomous-learning-library/blob/6d1111afce0d1582de463326f7d078a86e850551/all/presets/atari/models/__init__.py#L16
# apparently matters
class Linear0(nn.Linear):
def reset_parameters(self):
nn.init.constant_(self.weight, 0.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
Linear0(512, env.action_space.n)
)
def forward(self, x):
x = torch.Tensor(x).to(device)
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = ReplayBuffer(args.buffer_size)
q_network = QNetwork().to(device)
target_network = QNetwork().to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
# TRY NOT TO MODIFY: start the game
obs = env.reset()
episode_reward = 0
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
obs = np.array(obs)
logits = q_network.forward(obs.reshape((1,)+obs.shape))
if args.capture_video:
env.set_q_values(logits.tolist())
if random.random() < epsilon:
action = env.action_space.sample()
else:
action = torch.argmax(logits, dim=1).tolist()[0]
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, reward, done, info = env.step(action)
episode_reward += reward
# TRY NOT TO MODIFY: record rewards for plotting purposes
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
# ALGO LOGIC: training.
rb.put((obs, action, reward, next_obs, done))
if global_step > args.learning_starts and global_step % args.train_frequency == 0:
s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)
with torch.no_grad():
# target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]
current_value = q_network.forward(s_next_obses)
target_value = target_network.forward(s_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.Tensor(s_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(s_dones).to(device))
old_val = q_network.forward(s_obs).gather(1, torch.LongTensor(s_actions).view(-1,1).to(device)).squeeze()
loss = loss_fn(td_target, old_val)
writer.add_scalar("losses/td_loss", loss, global_step)
# optimize the midel
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
# update the target network
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
if done:
# important to note that because `EpisodicLifeEnv` wrapper is applied,
# the real episode reward is actually the sum of episode reward of 5 lives
# which we record through `info['episode']['r']` provided by gym.wrappers.RecordEpisodeStatistics
obs, episode_reward = env.reset(), 0
env.close()
writer.close()
|
__init__
|
Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
|
# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
# MASKED: __init__ function (lines 187-198)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# Reference: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Double DQN Agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
class QValueVisualizationWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env.reset()
self.image_shape = self.env.render(mode="rgb_array").shape
self.q_values = [[0.,0.,0.,0.]]
# self.metadata['video.frames_per_second'] = 60
def set_q_values(self, q_values):
self.q_values = q_values
def render(self, mode="human"):
if mode=="rgb_array":
env_rgb_array = super().render(mode)
fig, ax = plt.subplots(figsize=(self.image_shape[1]/100,self.image_shape[0]/100), constrained_layout=True, dpi=100)
df = pd.DataFrame(np.array(self.q_values).T)
sns.barplot(x=df.index, y=0, data=df, ax=ax)
ax.set(xlabel='actions', ylabel='q-values')
fig.canvas.draw()
X = np.array(fig.canvas.renderer.buffer_rgba())
Image.fromarray(X)
# Image.fromarray(X)
rgb_image = np.array(Image.fromarray(X).convert('RGB'))
plt.close(fig)
q_value_rgb_array = rgb_image
return np.append(env_rgb_array, q_value_rgb_array, axis=1)
else:
super().render(mode)
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
if args.capture_video:
env = QValueVisualizationWrapper(env)
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
# respect the default timelimit
assert isinstance(env.action_space, Discrete), "only discrete action space is supported"
# modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py#
class ReplayBuffer():
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask_lst.append(done_mask)
return np.array(s_lst), np.array(a_lst), \
np.array(r_lst), np.array(s_prime_lst), \
np.array(done_mask_lst)
# ALGO LOGIC: initialize agent here:
# tricks taken from https://github.com/cpnota/autonomous-learning-library/blob/6d1111afce0d1582de463326f7d078a86e850551/all/presets/atari/models/__init__.py#L16
# apparently matters
class Linear0(nn.Linear):
def reset_parameters(self):
nn.init.constant_(self.weight, 0.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
Linear0(512, env.action_space.n)
)
def forward(self, x):
x = torch.Tensor(x).to(device)
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = ReplayBuffer(args.buffer_size)
q_network = QNetwork().to(device)
target_network = QNetwork().to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
# TRY NOT TO MODIFY: start the game
obs = env.reset()
episode_reward = 0
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
obs = np.array(obs)
logits = q_network.forward(obs.reshape((1,)+obs.shape))
if args.capture_video:
env.set_q_values(logits.tolist())
if random.random() < epsilon:
action = env.action_space.sample()
else:
action = torch.argmax(logits, dim=1).tolist()[0]
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, reward, done, info = env.step(action)
episode_reward += reward
# TRY NOT TO MODIFY: record rewards for plotting purposes
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
# ALGO LOGIC: training.
rb.put((obs, action, reward, next_obs, done))
if global_step > args.learning_starts and global_step % args.train_frequency == 0:
s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)
with torch.no_grad():
# target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]
current_value = q_network.forward(s_next_obses)
target_value = target_network.forward(s_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.Tensor(s_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(s_dones).to(device))
old_val = q_network.forward(s_obs).gather(1, torch.LongTensor(s_actions).view(-1,1).to(device)).squeeze()
loss = loss_fn(td_target, old_val)
writer.add_scalar("losses/td_loss", loss, global_step)
# optimize the midel
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
# update the target network
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
if done:
# important to note that because `EpisodicLifeEnv` wrapper is applied,
# the real episode reward is actually the sum of episode reward of 5 lives
# which we record through `info['episode']['r']` provided by gym.wrappers.RecordEpisodeStatistics
obs, episode_reward = env.reset(), 0
env.close()
writer.close()
|
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
| 187
| 198
|
# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# Reference: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Double DQN Agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
class QValueVisualizationWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env.reset()
self.image_shape = self.env.render(mode="rgb_array").shape
self.q_values = [[0.,0.,0.,0.]]
# self.metadata['video.frames_per_second'] = 60
def set_q_values(self, q_values):
self.q_values = q_values
def render(self, mode="human"):
if mode=="rgb_array":
env_rgb_array = super().render(mode)
fig, ax = plt.subplots(figsize=(self.image_shape[1]/100,self.image_shape[0]/100), constrained_layout=True, dpi=100)
df = pd.DataFrame(np.array(self.q_values).T)
sns.barplot(x=df.index, y=0, data=df, ax=ax)
ax.set(xlabel='actions', ylabel='q-values')
fig.canvas.draw()
X = np.array(fig.canvas.renderer.buffer_rgba())
Image.fromarray(X)
# Image.fromarray(X)
rgb_image = np.array(Image.fromarray(X).convert('RGB'))
plt.close(fig)
q_value_rgb_array = rgb_image
return np.append(env_rgb_array, q_value_rgb_array, axis=1)
else:
super().render(mode)
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
if args.capture_video:
env = QValueVisualizationWrapper(env)
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
# respect the default timelimit
assert isinstance(env.action_space, Discrete), "only discrete action space is supported"
# modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py#
class ReplayBuffer():
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask_lst.append(done_mask)
return np.array(s_lst), np.array(a_lst), \
np.array(r_lst), np.array(s_prime_lst), \
np.array(done_mask_lst)
# ALGO LOGIC: initialize agent here:
# tricks taken from https://github.com/cpnota/autonomous-learning-library/blob/6d1111afce0d1582de463326f7d078a86e850551/all/presets/atari/models/__init__.py#L16
# apparently matters
class Linear0(nn.Linear):
def reset_parameters(self):
nn.init.constant_(self.weight, 0.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
Linear0(512, env.action_space.n)
)
def forward(self, x):
x = torch.Tensor(x).to(device)
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = ReplayBuffer(args.buffer_size)
q_network = QNetwork().to(device)
target_network = QNetwork().to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
# TRY NOT TO MODIFY: start the game
obs = env.reset()
episode_reward = 0
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
obs = np.array(obs)
logits = q_network.forward(obs.reshape((1,)+obs.shape))
if args.capture_video:
env.set_q_values(logits.tolist())
if random.random() < epsilon:
action = env.action_space.sample()
else:
action = torch.argmax(logits, dim=1).tolist()[0]
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, reward, done, info = env.step(action)
episode_reward += reward
# TRY NOT TO MODIFY: record rewards for plotting purposes
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
# ALGO LOGIC: training.
rb.put((obs, action, reward, next_obs, done))
if global_step > args.learning_starts and global_step % args.train_frequency == 0:
s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)
with torch.no_grad():
# target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]
current_value = q_network.forward(s_next_obses)
target_value = target_network.forward(s_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.Tensor(s_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(s_dones).to(device))
old_val = q_network.forward(s_obs).gather(1, torch.LongTensor(s_actions).view(-1,1).to(device)).squeeze()
loss = loss_fn(td_target, old_val)
writer.add_scalar("losses/td_loss", loss, global_step)
# optimize the midel
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
# update the target network
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
if done:
# important to note that because `EpisodicLifeEnv` wrapper is applied,
# the real episode reward is actually the sum of episode reward of 5 lives
# which we record through `info['episode']['r']` provided by gym.wrappers.RecordEpisodeStatistics
obs, episode_reward = env.reset(), 0
env.close()
writer.close()
|
apply
|
通过apply调用层会自动重用同名层
inputs: 上一层的输出;
layer: 要调用的层类名;
arguments: 传递给layer.call的参数;
kwargs: 传递给层初始化的参数。
|
#! -*- coding: utf-8 -*-
# 主要模型
import numpy as np
from bert4keras.layers import *
from bert4keras.snippets import insert_arguments
from bert4keras.snippets import delete_arguments
from bert4keras.snippets import is_string
from keras.models import Model
import json
class Transformer(object):
"""模型基类
"""
def __init__(
self,
vocab_size, # 词表大小
hidden_size, # 编码维度
num_hidden_layers, # Transformer总层数
num_attention_heads, # Attention的头数
intermediate_size, # FeedForward的隐层维度
hidden_act, # FeedForward隐层的激活函数
dropout_rate=None, # Dropout比例
embedding_size=None, # 是否指定embedding_size
attention_head_size=None, # Attention中V的head_size
attention_key_size=None, # Attention中Q,K的head_size
sequence_length=None, # 是否固定序列长度
keep_tokens=None, # 要保留的词ID列表
compound_tokens=None, # 扩展Embedding
residual_attention_scores=False, # Attention矩阵加残差
layers=None, # 外部传入的Keras层
prefix=None, # 层名前缀
name=None, # 模型名称
**kwargs
):
if keep_tokens is not None:
vocab_size = len(keep_tokens)
if compound_tokens is not None:
vocab_size += len(compound_tokens)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_head_size = attention_head_size or hidden_size // num_attention_heads
self.attention_key_size = attention_key_size or self.attention_head_size
self.intermediate_size = intermediate_size
self.dropout_rate = dropout_rate or 0
self.hidden_act = hidden_act
self.embedding_size = embedding_size or hidden_size
self.sequence_length = sequence_length
self.keep_tokens = keep_tokens
self.compound_tokens = compound_tokens
self.attention_bias = None
self.position_bias = None
self.attention_scores = None
self.residual_attention_scores = residual_attention_scores
self.layers = {} if layers is None else layers
self.prefix = prefix or ''
self.name = name
self.built = False
def build(
self,
attention_caches=None,
layer_norm_cond=None,
layer_norm_cond_hidden_size=None,
layer_norm_cond_hidden_act=None,
additional_input_layers=None,
**kwargs
):
"""模型构建函数
attention_caches:为Attention的K,V的缓存序列字典,格式为
{Attention层名: [K缓存, V缓存]};
layer_norm_*系列参数:实现Conditional Layer Normalization时使用,
用来实现以“固定长度向量”为条件的条件Bert。
"""
if self.built:
return None
# Input
inputs = self.get_inputs()
self.set_inputs(inputs, additional_input_layers)
# Other
self.attention_caches = attention_caches or {}
self.layer_norm_conds = [
layer_norm_cond,
layer_norm_cond_hidden_size,
layer_norm_cond_hidden_act or 'linear',
]
# Call
outputs = self.call(inputs)
self.set_outputs(outputs)
# Model
self.model = Model(self.inputs, self.outputs, name=self.name)
self.built = True
def call(self, inputs):
"""定义模型的执行流程
"""
# Embedding
outputs = self.apply_embeddings(inputs)
# Main
for i in range(self.num_hidden_layers):
outputs = self.apply_main_layers(outputs, i)
# Final
outputs = self.apply_final_layers(outputs)
return outputs
def prefixed(self, name):
"""给名字加前缀
"""
if name is not None:
return self.prefix + name
# MASKED: apply function (lines 115-162)
def get_inputs(self):
raise NotImplementedError
def apply_embeddings(self, inputs):
raise NotImplementedError
def apply_main_layers(self, inputs, index):
raise NotImplementedError
def apply_final_layers(self, inputs):
raise NotImplementedError
def compute_attention_bias(self, inputs=None):
"""定义每一层的Attention Bias
"""
return self.attention_bias
def compute_position_bias(self, inputs=None):
"""定义每一层的Position Bias(一般相对位置编码用)
"""
return self.position_bias
def set_inputs(self, inputs, additional_input_layers=None):
"""设置input和inputs属性
"""
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
inputs = [inputs]
inputs = inputs[:]
if additional_input_layers is not None:
if not isinstance(additional_input_layers, list):
additional_input_layers = [additional_input_layers]
inputs.extend(additional_input_layers)
self.inputs = inputs
if len(inputs) > 1:
self.input = inputs
else:
self.input = inputs[0]
def set_outputs(self, outputs):
"""设置output和oututs属性
"""
if not isinstance(outputs, list):
outputs = [outputs]
outputs = outputs[:]
self.outputs = outputs
if len(outputs) > 1:
self.output = outputs
else:
self.output = outputs[0]
@property
def initializer(self):
"""默认使用截断正态分布初始化
"""
return keras.initializers.TruncatedNormal(stddev=0.02)
def simplify(self, inputs):
"""将list中的None过滤掉
"""
inputs = [i for i in inputs if i is not None]
if len(inputs) == 1:
inputs = inputs[0]
return inputs
def load_embeddings(self, embeddings):
"""处理Embedding层权重
"""
if self.keep_tokens is not None:
embeddings = embeddings[self.keep_tokens]
if self.compound_tokens is not None:
ext_embeddings = []
for item in self.compound_tokens:
if isinstance(item, list):
item = (item, [1] * len(item))
ext_embeddings.append(
np.average(embeddings[item[0]], 0, item[1])
)
embeddings = np.concatenate([embeddings, ext_embeddings], 0)
return embeddings
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
if isinstance(checkpoint, dict):
return checkpoint[name]
else:
return tf.train.load_variable(checkpoint, name)
def create_variable(self, name, value, dtype=None):
"""创建一个变量
"""
dtype = dtype or K.floatx()
return K.variable(
self.initializer(value.shape, dtype), dtype, name=name
), value
def variable_mapping(self):
"""构建keras层与checkpoint的变量名之间的映射表
"""
return {}
def load_weights_from_checkpoint(self, checkpoint, mapping=None):
"""根据mapping从checkpoint加载权重
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
weight_value_pairs = []
for layer, variables in mapping.items():
layer = self.layers[layer]
weights = layer.trainable_weights
values = [self.load_variable(checkpoint, v) for v in variables]
if isinstance(layer, MultiHeadAttention):
"""如果key_size不等于head_size,则可以通过
正交矩阵将相应的权重投影到合适的shape。
"""
count = 2
if layer.use_bias:
count += 2
heads = self.num_attention_heads
head_size = self.attention_head_size
key_size = self.attention_key_size
W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T
if layer.attention_scale:
W = W * key_size**0.25 / head_size**0.25
for i in range(count):
w, v = weights[i], values[i]
w_shape, v_shape = K.int_shape(w), v.shape
if w_shape[-1] != v_shape[-1]:
pre_shape = w_shape[:-1]
v = v.reshape(pre_shape + (heads, head_size))
v = np.dot(v, W)
v = v.reshape(pre_shape + (heads * key_size,))
values[i] = v
weight_value_pairs.extend(zip(weights, values))
K.batch_set_value(weight_value_pairs)
def save_weights_as_checkpoint(self, filename, mapping=None, dtype=None):
"""根据mapping将权重保存为checkpoint格式
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
with tf.Graph().as_default():
all_variables, all_values = [], []
for layer, variables in mapping.items():
layer = self.layers[layer]
values = K.batch_get_value(layer.trainable_weights)
for name, value in zip(variables, values):
variable, value = self.create_variable(name, value, dtype)
all_variables.append(variable)
all_values.append(value)
with tf.Session() as sess:
K.batch_set_value(zip(all_variables, all_values))
saver = tf.train.Saver()
saver.save(sess, filename)
class LM_Mask(object):
"""定义下三角Attention Mask(语言模型用)
"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_bias is None:
def lm_mask(s):
seq_len = K.shape(s)[1]
idxs = K.arange(0, seq_len)
mask = idxs[None, :] <= idxs[:, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[None, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[0],
layer=Lambda,
function=lm_mask,
name='Attention-LM-Mask'
)
return self.attention_bias
class UniLM_Mask(object):
"""定义UniLM的Attention Mask(Seq2Seq模型用)
其中source和target的分区,由segment_ids来表示。
UniLM: https://arxiv.org/abs/1905.03197
"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_bias is None:
def unilm_mask(s):
idxs = K.cumsum(s, axis=1)
mask = idxs[:, None, :] <= idxs[:, :, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[:, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[1],
layer=Lambda,
function=unilm_mask,
name='Attention-UniLM-Mask'
)
return self.attention_bias
class BERT(Transformer):
"""构建BERT模型
"""
def __init__(
self,
max_position, # 序列最大长度
segment_vocab_size=2, # segment总数目
with_pool=False, # 是否包含Pool部分
with_nsp=False, # 是否包含NSP部分
with_mlm=False, # 是否包含MLM部分
hierarchical_position=None, # 是否层次分解位置编码
custom_position_ids=False, # 是否自行传入位置id
shared_segment_embeddings=False, # 若True,则segment跟token共用embedding
**kwargs # 其余参数
):
super(BERT, self).__init__(**kwargs)
self.max_position = max_position
self.segment_vocab_size = segment_vocab_size
self.with_pool = with_pool
self.with_nsp = with_nsp
self.with_mlm = with_mlm
self.hierarchical_position = hierarchical_position
self.custom_position_ids = custom_position_ids
self.shared_segment_embeddings = shared_segment_embeddings
if self.with_nsp and not self.with_pool:
self.with_pool = True
def get_inputs(self):
"""BERT的输入是token_ids和segment_ids
(但允许自行传入位置id,以实现一些特殊需求)
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
inputs = [x_in]
if self.segment_vocab_size > 0:
s_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Segment'
)
inputs.append(s_in)
if self.custom_position_ids:
p_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Position'
)
inputs.append(p_in)
return inputs
def apply_embeddings(self, inputs):
"""BERT的embedding是token、position、segment三者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""BERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""根据剩余参数决定输出
"""
x = inputs
z = self.layer_norm_conds[0]
outputs = [x]
if self.with_pool:
# Pooler部分(提取CLS向量)
x = outputs[0]
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x[:, 0],
name='Pooler'
)
pool_activation = 'tanh' if self.with_pool is True else self.with_pool
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=pool_activation,
kernel_initializer=self.initializer,
name='Pooler-Dense'
)
if self.with_nsp:
# Next Sentence Prediction部分
x = self.apply(
inputs=x,
layer=Dense,
units=2,
activation='softmax',
kernel_initializer=self.initializer,
name='NSP-Proba'
)
outputs.append(x)
if self.with_mlm:
# Masked Language Model部分
x = outputs[0]
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='MLM-Dense'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='MLM-Norm'
)
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(inputs=x, layer=BiasAdd, name='MLM-Bias')
mlm_activation = 'softmax' if self.with_mlm is True else self.with_mlm
x = self.apply(
inputs=x,
layer=Activation,
activation=mlm_activation,
name='MLM-Activation'
)
outputs.append(x)
if len(outputs) == 1:
outputs = outputs[0]
elif len(outputs) == 2:
outputs = outputs[1]
else:
outputs = outputs[1:]
return outputs
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(BERT, self).load_variable(checkpoint, name)
if name in [
'bert/embeddings/word_embeddings',
'cls/predictions/output_bias',
]:
return self.load_embeddings(variable)
elif name == 'cls/seq_relationship/output_weights':
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
"""在tensorflow中创建一个变量
"""
if name == 'cls/seq_relationship/output_weights':
value = value.T
return super(BERT, self).create_variable(name, value, dtype)
def variable_mapping(self):
"""映射到官方BERT权重格式
"""
mapping = {
'Embedding-Token': ['bert/embeddings/word_embeddings'],
'Embedding-Segment': ['bert/embeddings/token_type_embeddings'],
'Embedding-Position': ['bert/embeddings/position_embeddings'],
'Embedding-Norm': [
'bert/embeddings/LayerNorm/beta',
'bert/embeddings/LayerNorm/gamma',
],
'Embedding-Mapping': [
'bert/encoder/embedding_hidden_mapping_in/kernel',
'bert/encoder/embedding_hidden_mapping_in/bias',
],
'Pooler-Dense': [
'bert/pooler/dense/kernel',
'bert/pooler/dense/bias',
],
'NSP-Proba': [
'cls/seq_relationship/output_weights',
'cls/seq_relationship/output_bias',
],
'MLM-Dense': [
'cls/predictions/transform/dense/kernel',
'cls/predictions/transform/dense/bias',
],
'MLM-Norm': [
'cls/predictions/transform/LayerNorm/beta',
'cls/predictions/transform/LayerNorm/gamma',
],
'MLM-Bias': ['cls/predictions/output_bias'],
}
for i in range(self.num_hidden_layers):
prefix = 'bert/encoder/layer_%d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention/self/query/kernel',
prefix + 'attention/self/query/bias',
prefix + 'attention/self/key/kernel',
prefix + 'attention/self/key/bias',
prefix + 'attention/self/value/kernel',
prefix + 'attention/self/value/bias',
prefix + 'attention/output/dense/kernel',
prefix + 'attention/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'attention/output/LayerNorm/beta',
prefix + 'attention/output/LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/dense/kernel',
prefix + 'intermediate/dense/bias',
prefix + 'output/dense/kernel',
prefix + 'output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'output/LayerNorm/beta',
prefix + 'output/LayerNorm/gamma',
],
})
return mapping
class ALBERT(BERT):
"""构建ALBERT模型
"""
def apply_main_layers(self, inputs, index):
"""ALBERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-MultiHeadSelfAttention'
feed_forward_name = 'Transformer-FeedForward'
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
mapping.update({
'Transformer-MultiHeadSelfAttention': [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-MultiHeadSelfAttention-Norm': [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-FeedForward': [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-FeedForward-Norm': [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class ALBERT_Unshared(BERT):
"""解开ALBERT共享约束,当成BERT用
"""
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT_Unshared, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
for i in range(self.num_hidden_layers):
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class NEZHA(BERT):
"""华为推出的NAZHA模型
链接:https://arxiv.org/abs/1909.00204
"""
def apply_embeddings(self, inputs):
"""NEZHA的embedding是token、segment两者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=2,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""NEZHA的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi, x = x, [x, x, x, position_bias]
arguments = {'a_bias': None, 'p_bias': 'typical_relative'}
if attention_mask is not None:
arguments['a_bias'] = True
x.insert(3, attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def compute_position_bias(self, inputs=None):
"""经典相对位置编码
"""
if self.position_bias is None:
x = inputs
self.position_bias = self.apply(
inputs=[x, x],
layer=RelativePositionEmbedding,
input_dim=2 * 64 + 1,
output_dim=self.attention_head_size,
embeddings_initializer='Sinusoidal',
name='Embedding-Relative-Position',
trainable=False
)
return self.position_bias
class ELECTRA(BERT):
"""Google推出的ELECTRA模型
链接:https://arxiv.org/abs/2003.10555
"""
@insert_arguments(with_discriminator=False)
@delete_arguments('with_pool', 'with_mlm')
def __init__(
self,
max_position, # 序列最大长度
**kwargs # 其余参数
):
super(ELECTRA, self).__init__(max_position, **kwargs)
def apply_final_layers(self, inputs):
x = inputs
if self.with_discriminator:
if self.with_discriminator is True:
final_activation = 'sigmoid'
else:
final_activation = self.with_discriminator
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='Discriminator-Dense'
)
x = self.apply(
inputs=x,
layer=Dense,
units=1,
activation=final_activation,
kernel_initializer=self.initializer,
name='Discriminator-Prediction'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(ELECTRA, self).load_variable(checkpoint, name)
if name == 'electra/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
mapping = super(ELECTRA, self).variable_mapping()
mapping['Embedding-Mapping'] = [
'electra/embeddings_project/kernel',
'electra/embeddings_project/bias',
]
mapping = {
k: [i.replace('bert/', 'electra/') for i in v]
for k, v in mapping.items()
}
mapping['Discriminator-Dense'] = [
'discriminator_predictions/dense/kernel',
'discriminator_predictions/dense/bias',
]
mapping['Discriminator-Prediction'] = [
'discriminator_predictions/dense_1/kernel',
'discriminator_predictions/dense_1/bias',
]
return mapping
class GPT(LM_Mask, BERT):
"""构建GPT模型
链接:https://github.com/openai/finetune-transformer-lm
"""
@insert_arguments(final_activation='softmax')
@delete_arguments('with_pool', 'with_mlm')
def __init__(self, **kwargs):
super(GPT, self).__init__(**kwargs)
def apply_embeddings(self, inputs):
"""GPT的embedding是token、position、segment三者embedding之和
跟BERT的主要区别是三者相加之后没有加LayerNormalization层。
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
# Language Model部分
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=self.final_activation,
name='LM-Activation'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT, self).load_variable(checkpoint, name)
if name == 'gpt/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
"""映射到TF版GPT权重格式
"""
mapping = super(GPT, self).variable_mapping()
mapping = {
k: [
i.replace('bert/', 'gpt/').replace('encoder', 'transformer')
for i in v
]
for k, v in mapping.items()
}
return mapping
class GPT2(GPT):
"""构建GPT2模型
链接: https://github.com/openai/gpt-2
"""
def get_inputs(self):
"""GPT2的输入是token_ids
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""GPT2的embedding是token、position两者embedding之和
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask],
layer=MultiHeadAttention,
arguments={'a_bias': True},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Output-Dropout'
)
x = super(GPT2, self).apply_final_layers(x)
return x
def variable_mapping(self):
"""映射到TF版GPT2权重格式
"""
mapping = super(GPT2, self).variable_mapping()
mapping = {
k: [i.replace('output/LayerNorm', 'input/LayerNorm') for i in v]
for k, v in mapping.items()
}
mapping['Output-Norm'] = [
'gpt/output/LayerNorm/beta',
'gpt/output/LayerNorm/gamma',
]
return mapping
class GPT2_ML(GPT):
"""构建GPT2_ML模型
链接: https://github.com/imcaspar/gpt2-ml
注意:GPT2_ML虽然号称GPT2,但是它的结构其实更接近GPT,它自称GPT2的
原因大概是因为它开源的版本参数量达到了GPT2的15亿参数。
"""
def get_inputs(self):
"""GPT2_ML的输入是token_ids
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""GPT2_ML的embedding是token、position两者embedding之和
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2_ML的主体是基于Self-Attention的模块
顺序:Att --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x, attention_mask], {'a_bias': True}
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-0' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-1' % feed_forward_name
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT2_ML, self).load_variable(checkpoint, name)
if name == 'newslm/embeddings/word_embed':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
"""映射到官方GPT2_ML权重格式
"""
mapping = {
'Embedding-Token': ['newslm/embeddings/word_embed'],
'Embedding-Position': ['newslm/embeddings/pos_embed'],
'Embedding-Norm': [
'newslm/embeddings/LayerNorm_embed_norm/beta',
'newslm/embeddings/LayerNorm_embed_norm/gamma',
],
}
for i in range(self.num_hidden_layers):
prefix = 'newslm/layer%02d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'query_layer/kernel',
prefix + 'query_layer/bias',
prefix + 'key_layer/kernel',
prefix + 'key_layer/bias',
prefix + 'value_layer/kernel',
prefix + 'value_layer/bias',
prefix + 'context_projection_layer/kernel',
prefix + 'context_projection_layer/bias',
],
'Transformer-%d-FeedForward-Norm-0' % i: [
prefix + 'LayerNorm_mlp_ln0/beta',
prefix + 'LayerNorm_mlp_ln0/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/kernel',
prefix + 'intermediate/bias',
prefix + 'output/kernel',
prefix + 'output/bias',
],
'Transformer-%d-FeedForward-Norm-1' % i: [
prefix + 'LayerNorm_mlp_ln1/beta',
prefix + 'LayerNorm_mlp_ln1/gamma',
],
})
return mapping
class T5_Base(Transformer):
"""Google的T5模型(基类)
注意T5有两个版本,一开始放出来的版本称为t5.1.0,而后来放出了一个升级
版本称为t5.1.1,两者结构略有不同,包括后来放出来的多国语言版T5也采用
了t5.1.1的结构。
t5.1.0: https://github.com/google-research/text-to-text-transfer-transformer
t5.1.1: https://github.com/google-research/text-to-text-transfer-transformer/blob/master/released_checkpoints.md#t511
multilingual-t5: https://github.com/google-research/multilingual-t5
"""
@insert_arguments(version='t5.1.0')
def __init__(self, **kwargs):
super(T5_Base, self).__init__(**kwargs)
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(T5_Base, self).load_variable(checkpoint, name)
if name == 'shared/embedding':
return self.load_embeddings(variable)
elif name == 'decoder/logits/kernel':
return self.load_embeddings(variable.T).T
elif 'relative_attention_bias' in name:
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
"""在tensorflow中创建一个变量
"""
if 'relative_attention_bias' in name:
value = value.T
return super(T5_Base, self).create_variable(name, value, dtype)
def variable_mapping(self):
"""映射到官方T5权重格式
"""
mapping = {
'Embedding-Token': ['shared/embedding'],
'Encoder-Embedding-Relative-Position': [
'encoder/block_000/layer_000/SelfAttention/relative_attention_bias'
],
'Encoder-Output-Norm': ['encoder/final_layer_norm/scale'],
'Decoder-Embedding-Relative-Position': [
'decoder/block_000/layer_000/SelfAttention/relative_attention_bias',
],
'Decoder-Output-Norm': ['decoder/final_layer_norm/scale'],
}
for i in range(self.num_hidden_layers):
# Encoder主体
prefix = 'encoder/block_%03d/' % i
mapping.update({
'Encoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Encoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Encoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_001/DenseReluDense/wi/kernel',
prefix + 'layer_001/DenseReluDense/wo/kernel',
],
'Encoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
})
# Decoder主体
prefix = 'decoder/block_%03d/' % i
mapping.update({
'Decoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Decoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention' % i: [
prefix + 'layer_001/EncDecAttention/q',
prefix + 'layer_001/EncDecAttention/k',
prefix + 'layer_001/EncDecAttention/v',
prefix + 'layer_001/EncDecAttention/o',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
'Decoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_002/DenseReluDense/wi/kernel',
prefix + 'layer_002/DenseReluDense/wo/kernel',
],
'Decoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_002/layer_norm/scale',
],
})
if self.version == 't5.1.1':
mapping['Encoder-Output-Norm'] = ['encoder/rms_norm/scale']
mapping['Decoder-Output-Norm'] = ['decoder/rms_norm/scale']
mapping['Decoder-Output-LM'] = ['decoder/logits/kernel']
mapping = {
k: [i.replace('layer_norm', 'rms_norm') for i in v]
for k, v in mapping.items()
}
for i in range(self.num_hidden_layers):
for layer in [
'Encoder-Transformer-%d-FeedForward' % i,
'Decoder-Transformer-%d-FeedForward' % i
]:
mapping[layer] = [
mapping[layer][0][:-7] + '_0' + mapping[layer][0][-7:],
mapping[layer][0][:-7] + '_1' + mapping[layer][0][-7:],
mapping[layer][1]
]
return mapping
class T5_Encoder(T5_Base):
"""Google的T5模型(Encoder)
"""
def get_inputs(self):
"""T5的Encoder的输入只有token_ids
"""
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Encoder-Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Encoder-Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""T5的Encoder的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Encoder-Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Encoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, position_bias],
layer=MultiHeadAttention,
arguments={'p_bias': 't5_relative'},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Encoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Output-Dropout'
)
return x
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x = inputs
p = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=True,
embeddings_initializer=self.initializer,
name='Encoder-Embedding-Relative-Position'
)
self.position_bias = p
return self.position_bias
class T5_Decoder(LM_Mask, T5_Base):
"""Google的T5模型(Decoder)
"""
def __init__(self, with_lm=True, **kwargs):
super(T5_Decoder, self).__init__(**kwargs)
self.with_lm = with_lm
def get_inputs(self):
"""T5的Decoder的输入为context序列和token_ids
"""
c_in = self.apply(
layer=Input,
shape=(self.sequence_length, self.hidden_size),
name='Input-Context'
)
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Decoder-Input-Token'
)
return [c_in, x_in]
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
c, x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Decoder-Embedding-Mapping'
)
return [c, x]
def apply_main_layers(self, inputs, index):
"""T5的Dencoder主体是基于Self-Attention、Cross-Attention的模块
顺序:LN --> Att1 --> Add --> LN --> Att2 --> Add --> LN --> FFN --> Add
"""
c, x = inputs
z = self.layer_norm_conds[0]
self_attention_name = 'Decoder-Transformer-%d-MultiHeadSelfAttention' % index
cross_attention_name = 'Decoder-Transformer-%d-MultiHeadCrossAttention' % index
feed_forward_name = 'Decoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias([x, c])
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % self_attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask, position_bias[0]],
layer=MultiHeadAttention,
arguments={
'a_bias': True,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=self_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % self_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % self_attention_name
)
# Cross Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % cross_attention_name
)
x = self.apply(
inputs=[x, c, c, position_bias[1]],
layer=MultiHeadAttention,
arguments={
'a_bias': None,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=cross_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % cross_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % cross_attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return [c, x]
def apply_final_layers(self, inputs):
"""剩余部分
"""
c, x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Decoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Output-Dropout'
)
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x / np.sqrt(self.hidden_size),
mask=lambda i, m: m,
name='Decoder-Output-Scale'
)
if self.with_lm:
# 预测token概率部分
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
kernel_initializer=self.initializer,
name='Decoder-Output-Mapping'
)
lm_activation = 'softmax' if self.with_lm is True else self.with_lm
if self.version == 't5.1.0':
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=lm_activation,
name='Dencoder-Output-LM-Activation'
)
else:
x = self.apply(
inputs=x,
layer=Dense,
units=self.vocab_size,
activation=lm_activation,
use_bias=False,
kernel_initializer=self.initializer,
name='Decoder-Output-LM'
)
return x
def compute_attention_bias(self, inputs=None):
"""修改LM Mask的序列长度(从 self.inputs[0] 改为 self.inputs[1] )
"""
old_inputs = self.inputs[:]
self.inputs = [old_inputs[1]]
mask = super(T5_Decoder, self).compute_attention_bias(inputs)
self.inputs = old_inputs
return mask
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x, c = inputs
p1 = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
p2 = self.apply(
inputs=[x, c],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
self.position_bias = (p1, p2)
return self.position_bias
class T5(T5_Base):
"""Google的T5模型(Encoder-Decoder)
"""
def __init__(self, **kwargs):
super(T5, self).__init__(**kwargs)
kwargs['layers'] = self.layers
e_name, d_name = 'Encoder', 'Decoder'
if 'name' in kwargs:
e_name = '%s_%s' % (kwargs['name'], e_name)
d_name = '%s_%s' % (kwargs['name'], d_name)
del kwargs['name'] # 防止重复传参
self._encoder = T5_Encoder(name=e_name, **kwargs)
self._decoder = T5_Decoder(name=d_name, **kwargs)
def build(self, **kwargs):
"""同时构建Encoder和Decoder
"""
self._encoder.build(**kwargs)
self._decoder.build(**kwargs)
self.encoder = self._encoder.model
self.decoder = self._decoder.model
self.inputs = self.encoder.inputs + self.decoder.inputs[1:]
self.outputs = self.decoder(
self.encoder.outputs + self.decoder.inputs[1:]
)
self.model = Model(self.inputs, self.outputs)
def extend_with_language_model(BaseModel):
"""添加下三角的Attention Mask(语言模型用)
"""
class LanguageModel(LM_Mask, BaseModel):
"""带下三角Attention Mask的派生模型
"""
def __init__(self, *args, **kwargs):
super(LanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return LanguageModel
def extend_with_unified_language_model(BaseModel):
"""添加UniLM的Attention Mask(Seq2Seq模型用)
"""
class UnifiedLanguageModel(UniLM_Mask, BaseModel):
"""带UniLM的Attention Mask的派生模型
UniLM: https://arxiv.org/abs/1905.03197
"""
def __init__(self, *args, **kwargs):
super(UnifiedLanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return UnifiedLanguageModel
def build_transformer_model(
config_path=None,
checkpoint_path=None,
model='bert',
application='encoder',
return_keras_model=True,
**kwargs
):
"""根据配置文件构建模型,可选加载checkpoint权重
"""
configs = {}
if config_path is not None:
configs.update(json.load(open(config_path)))
configs.update(kwargs)
if 'max_position' not in configs:
configs['max_position'] = configs.get('max_position_embeddings', 512)
if 'dropout_rate' not in configs:
configs['dropout_rate'] = configs.get('hidden_dropout_prob')
if 'segment_vocab_size' not in configs:
configs['segment_vocab_size'] = configs.get('type_vocab_size', 2)
models = {
'bert': BERT,
'albert': ALBERT,
'albert_unshared': ALBERT_Unshared,
'roberta': BERT,
'nezha': NEZHA,
'electra': ELECTRA,
'gpt': GPT,
'gpt2': GPT2,
'gpt2_ml': GPT2_ML,
't5': T5,
't5_encoder': T5_Encoder,
't5_decoder': T5_Decoder,
't5.1.0': T5,
't5.1.0_encoder': T5_Encoder,
't5.1.0_decoder': T5_Decoder,
't5.1.1': T5,
't5.1.1_encoder': T5_Encoder,
't5.1.1_decoder': T5_Decoder,
}
if is_string(model):
model = model.lower()
MODEL = models[model]
else:
MODEL = model
application = application.lower()
if application in ['lm', 'unilm'] and model in ['electra', 't5']:
raise ValueError(
'"%s" model can not be used as "%s" application.\n' %
(model, application)
)
if application == 'lm':
MODEL = extend_with_language_model(MODEL)
elif application == 'unilm':
MODEL = extend_with_unified_language_model(MODEL)
if model.startswith('t5.1.1'):
configs['version'] = 't5.1.1'
transformer = MODEL(**configs)
# 此处以Transformer类中的build()函数创建模型.
transformer.build(**configs)
if checkpoint_path is not None:
transformer.load_weights_from_checkpoint(checkpoint_path)
if return_keras_model:
return transformer.model
else:
return transformer
|
def apply(self, inputs=None, layer=None, arguments=None, **kwargs):
"""通过apply调用层会自动重用同名层
inputs: 上一层的输出;
layer: 要调用的层类名;
arguments: 传递给layer.call的参数;
kwargs: 传递给层初始化的参数。
"""
if layer is Dropout and self.dropout_rate == 0:
return inputs
if layer is MultiHeadAttention and self.residual_attention_scores:
kwargs['return_attention_scores'] = True
arguments = arguments or {}
name = self.prefixed(kwargs.get('name'))
kwargs['name'] = name
if name not in self.layers:
layer = layer(**kwargs)
name = layer.name
self.layers[name] = layer
if inputs is None:
return self.layers[name]
else:
if isinstance(self.layers[name], MultiHeadAttention):
if name in self.attention_caches:
# 如果检测到Cache的传入,那么自动在Key,Value处拼接起来
k_cache, v_cache = self.attention_caches[name]
k_name, v_name = name + '-Cached-Key', name + '-Cached-Value'
k = Concatenate1D(name=k_name)([k_cache, inputs[1]])
v = Concatenate1D(name=v_name)([v_cache, inputs[2]])
inputs = inputs[:1] + [k, v] + inputs[3:]
if self.residual_attention_scores:
# 如果使用残差Attention矩阵,则给每个Attention矩阵加上前上一层的Attention
# 矩阵,这对应RealFormer设计(https://arxiv.org/abs/2012.11747)。目前
# 该实现还相对粗糙,可能欠缺通用性。
if self.attention_scores is not None:
if arguments.get('a_bias'):
a_bias = Add(name=name + '-Attention-Bias'
)([inputs[3], self.attention_scores])
else:
a_bias = self.attention_scores
inputs = inputs[:3] + [a_bias] + inputs[4:]
arguments['a_bias'] = True
o, a = self.layers[name](inputs, **arguments)
self.attention_scores = a
return o
return self.layers[name](inputs, **arguments)
| 115
| 162
|
#! -*- coding: utf-8 -*-
# 主要模型
import numpy as np
from bert4keras.layers import *
from bert4keras.snippets import insert_arguments
from bert4keras.snippets import delete_arguments
from bert4keras.snippets import is_string
from keras.models import Model
import json
class Transformer(object):
"""模型基类
"""
def __init__(
self,
vocab_size, # 词表大小
hidden_size, # 编码维度
num_hidden_layers, # Transformer总层数
num_attention_heads, # Attention的头数
intermediate_size, # FeedForward的隐层维度
hidden_act, # FeedForward隐层的激活函数
dropout_rate=None, # Dropout比例
embedding_size=None, # 是否指定embedding_size
attention_head_size=None, # Attention中V的head_size
attention_key_size=None, # Attention中Q,K的head_size
sequence_length=None, # 是否固定序列长度
keep_tokens=None, # 要保留的词ID列表
compound_tokens=None, # 扩展Embedding
residual_attention_scores=False, # Attention矩阵加残差
layers=None, # 外部传入的Keras层
prefix=None, # 层名前缀
name=None, # 模型名称
**kwargs
):
if keep_tokens is not None:
vocab_size = len(keep_tokens)
if compound_tokens is not None:
vocab_size += len(compound_tokens)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_head_size = attention_head_size or hidden_size // num_attention_heads
self.attention_key_size = attention_key_size or self.attention_head_size
self.intermediate_size = intermediate_size
self.dropout_rate = dropout_rate or 0
self.hidden_act = hidden_act
self.embedding_size = embedding_size or hidden_size
self.sequence_length = sequence_length
self.keep_tokens = keep_tokens
self.compound_tokens = compound_tokens
self.attention_bias = None
self.position_bias = None
self.attention_scores = None
self.residual_attention_scores = residual_attention_scores
self.layers = {} if layers is None else layers
self.prefix = prefix or ''
self.name = name
self.built = False
def build(
self,
attention_caches=None,
layer_norm_cond=None,
layer_norm_cond_hidden_size=None,
layer_norm_cond_hidden_act=None,
additional_input_layers=None,
**kwargs
):
"""模型构建函数
attention_caches:为Attention的K,V的缓存序列字典,格式为
{Attention层名: [K缓存, V缓存]};
layer_norm_*系列参数:实现Conditional Layer Normalization时使用,
用来实现以“固定长度向量”为条件的条件Bert。
"""
if self.built:
return None
# Input
inputs = self.get_inputs()
self.set_inputs(inputs, additional_input_layers)
# Other
self.attention_caches = attention_caches or {}
self.layer_norm_conds = [
layer_norm_cond,
layer_norm_cond_hidden_size,
layer_norm_cond_hidden_act or 'linear',
]
# Call
outputs = self.call(inputs)
self.set_outputs(outputs)
# Model
self.model = Model(self.inputs, self.outputs, name=self.name)
self.built = True
def call(self, inputs):
"""定义模型的执行流程
"""
# Embedding
outputs = self.apply_embeddings(inputs)
# Main
for i in range(self.num_hidden_layers):
outputs = self.apply_main_layers(outputs, i)
# Final
outputs = self.apply_final_layers(outputs)
return outputs
def prefixed(self, name):
"""给名字加前缀
"""
if name is not None:
return self.prefix + name
def apply(self, inputs=None, layer=None, arguments=None, **kwargs):
"""通过apply调用层会自动重用同名层
inputs: 上一层的输出;
layer: 要调用的层类名;
arguments: 传递给layer.call的参数;
kwargs: 传递给层初始化的参数。
"""
if layer is Dropout and self.dropout_rate == 0:
return inputs
if layer is MultiHeadAttention and self.residual_attention_scores:
kwargs['return_attention_scores'] = True
arguments = arguments or {}
name = self.prefixed(kwargs.get('name'))
kwargs['name'] = name
if name not in self.layers:
layer = layer(**kwargs)
name = layer.name
self.layers[name] = layer
if inputs is None:
return self.layers[name]
else:
if isinstance(self.layers[name], MultiHeadAttention):
if name in self.attention_caches:
# 如果检测到Cache的传入,那么自动在Key,Value处拼接起来
k_cache, v_cache = self.attention_caches[name]
k_name, v_name = name + '-Cached-Key', name + '-Cached-Value'
k = Concatenate1D(name=k_name)([k_cache, inputs[1]])
v = Concatenate1D(name=v_name)([v_cache, inputs[2]])
inputs = inputs[:1] + [k, v] + inputs[3:]
if self.residual_attention_scores:
# 如果使用残差Attention矩阵,则给每个Attention矩阵加上前上一层的Attention
# 矩阵,这对应RealFormer设计(https://arxiv.org/abs/2012.11747)。目前
# 该实现还相对粗糙,可能欠缺通用性。
if self.attention_scores is not None:
if arguments.get('a_bias'):
a_bias = Add(name=name + '-Attention-Bias'
)([inputs[3], self.attention_scores])
else:
a_bias = self.attention_scores
inputs = inputs[:3] + [a_bias] + inputs[4:]
arguments['a_bias'] = True
o, a = self.layers[name](inputs, **arguments)
self.attention_scores = a
return o
return self.layers[name](inputs, **arguments)
def get_inputs(self):
raise NotImplementedError
def apply_embeddings(self, inputs):
raise NotImplementedError
def apply_main_layers(self, inputs, index):
raise NotImplementedError
def apply_final_layers(self, inputs):
raise NotImplementedError
def compute_attention_bias(self, inputs=None):
"""定义每一层的Attention Bias
"""
return self.attention_bias
def compute_position_bias(self, inputs=None):
"""定义每一层的Position Bias(一般相对位置编码用)
"""
return self.position_bias
def set_inputs(self, inputs, additional_input_layers=None):
"""设置input和inputs属性
"""
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
inputs = [inputs]
inputs = inputs[:]
if additional_input_layers is not None:
if not isinstance(additional_input_layers, list):
additional_input_layers = [additional_input_layers]
inputs.extend(additional_input_layers)
self.inputs = inputs
if len(inputs) > 1:
self.input = inputs
else:
self.input = inputs[0]
def set_outputs(self, outputs):
"""设置output和oututs属性
"""
if not isinstance(outputs, list):
outputs = [outputs]
outputs = outputs[:]
self.outputs = outputs
if len(outputs) > 1:
self.output = outputs
else:
self.output = outputs[0]
@property
def initializer(self):
"""默认使用截断正态分布初始化
"""
return keras.initializers.TruncatedNormal(stddev=0.02)
def simplify(self, inputs):
"""将list中的None过滤掉
"""
inputs = [i for i in inputs if i is not None]
if len(inputs) == 1:
inputs = inputs[0]
return inputs
def load_embeddings(self, embeddings):
"""处理Embedding层权重
"""
if self.keep_tokens is not None:
embeddings = embeddings[self.keep_tokens]
if self.compound_tokens is not None:
ext_embeddings = []
for item in self.compound_tokens:
if isinstance(item, list):
item = (item, [1] * len(item))
ext_embeddings.append(
np.average(embeddings[item[0]], 0, item[1])
)
embeddings = np.concatenate([embeddings, ext_embeddings], 0)
return embeddings
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
if isinstance(checkpoint, dict):
return checkpoint[name]
else:
return tf.train.load_variable(checkpoint, name)
def create_variable(self, name, value, dtype=None):
"""创建一个变量
"""
dtype = dtype or K.floatx()
return K.variable(
self.initializer(value.shape, dtype), dtype, name=name
), value
def variable_mapping(self):
"""构建keras层与checkpoint的变量名之间的映射表
"""
return {}
def load_weights_from_checkpoint(self, checkpoint, mapping=None):
"""根据mapping从checkpoint加载权重
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
weight_value_pairs = []
for layer, variables in mapping.items():
layer = self.layers[layer]
weights = layer.trainable_weights
values = [self.load_variable(checkpoint, v) for v in variables]
if isinstance(layer, MultiHeadAttention):
"""如果key_size不等于head_size,则可以通过
正交矩阵将相应的权重投影到合适的shape。
"""
count = 2
if layer.use_bias:
count += 2
heads = self.num_attention_heads
head_size = self.attention_head_size
key_size = self.attention_key_size
W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T
if layer.attention_scale:
W = W * key_size**0.25 / head_size**0.25
for i in range(count):
w, v = weights[i], values[i]
w_shape, v_shape = K.int_shape(w), v.shape
if w_shape[-1] != v_shape[-1]:
pre_shape = w_shape[:-1]
v = v.reshape(pre_shape + (heads, head_size))
v = np.dot(v, W)
v = v.reshape(pre_shape + (heads * key_size,))
values[i] = v
weight_value_pairs.extend(zip(weights, values))
K.batch_set_value(weight_value_pairs)
def save_weights_as_checkpoint(self, filename, mapping=None, dtype=None):
"""根据mapping将权重保存为checkpoint格式
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
with tf.Graph().as_default():
all_variables, all_values = [], []
for layer, variables in mapping.items():
layer = self.layers[layer]
values = K.batch_get_value(layer.trainable_weights)
for name, value in zip(variables, values):
variable, value = self.create_variable(name, value, dtype)
all_variables.append(variable)
all_values.append(value)
with tf.Session() as sess:
K.batch_set_value(zip(all_variables, all_values))
saver = tf.train.Saver()
saver.save(sess, filename)
class LM_Mask(object):
"""定义下三角Attention Mask(语言模型用)
"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_bias is None:
def lm_mask(s):
seq_len = K.shape(s)[1]
idxs = K.arange(0, seq_len)
mask = idxs[None, :] <= idxs[:, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[None, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[0],
layer=Lambda,
function=lm_mask,
name='Attention-LM-Mask'
)
return self.attention_bias
class UniLM_Mask(object):
"""定义UniLM的Attention Mask(Seq2Seq模型用)
其中source和target的分区,由segment_ids来表示。
UniLM: https://arxiv.org/abs/1905.03197
"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_bias is None:
def unilm_mask(s):
idxs = K.cumsum(s, axis=1)
mask = idxs[:, None, :] <= idxs[:, :, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[:, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[1],
layer=Lambda,
function=unilm_mask,
name='Attention-UniLM-Mask'
)
return self.attention_bias
class BERT(Transformer):
"""构建BERT模型
"""
def __init__(
self,
max_position, # 序列最大长度
segment_vocab_size=2, # segment总数目
with_pool=False, # 是否包含Pool部分
with_nsp=False, # 是否包含NSP部分
with_mlm=False, # 是否包含MLM部分
hierarchical_position=None, # 是否层次分解位置编码
custom_position_ids=False, # 是否自行传入位置id
shared_segment_embeddings=False, # 若True,则segment跟token共用embedding
**kwargs # 其余参数
):
super(BERT, self).__init__(**kwargs)
self.max_position = max_position
self.segment_vocab_size = segment_vocab_size
self.with_pool = with_pool
self.with_nsp = with_nsp
self.with_mlm = with_mlm
self.hierarchical_position = hierarchical_position
self.custom_position_ids = custom_position_ids
self.shared_segment_embeddings = shared_segment_embeddings
if self.with_nsp and not self.with_pool:
self.with_pool = True
def get_inputs(self):
"""BERT的输入是token_ids和segment_ids
(但允许自行传入位置id,以实现一些特殊需求)
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
inputs = [x_in]
if self.segment_vocab_size > 0:
s_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Segment'
)
inputs.append(s_in)
if self.custom_position_ids:
p_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Position'
)
inputs.append(p_in)
return inputs
def apply_embeddings(self, inputs):
"""BERT的embedding是token、position、segment三者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""BERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""根据剩余参数决定输出
"""
x = inputs
z = self.layer_norm_conds[0]
outputs = [x]
if self.with_pool:
# Pooler部分(提取CLS向量)
x = outputs[0]
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x[:, 0],
name='Pooler'
)
pool_activation = 'tanh' if self.with_pool is True else self.with_pool
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=pool_activation,
kernel_initializer=self.initializer,
name='Pooler-Dense'
)
if self.with_nsp:
# Next Sentence Prediction部分
x = self.apply(
inputs=x,
layer=Dense,
units=2,
activation='softmax',
kernel_initializer=self.initializer,
name='NSP-Proba'
)
outputs.append(x)
if self.with_mlm:
# Masked Language Model部分
x = outputs[0]
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='MLM-Dense'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='MLM-Norm'
)
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(inputs=x, layer=BiasAdd, name='MLM-Bias')
mlm_activation = 'softmax' if self.with_mlm is True else self.with_mlm
x = self.apply(
inputs=x,
layer=Activation,
activation=mlm_activation,
name='MLM-Activation'
)
outputs.append(x)
if len(outputs) == 1:
outputs = outputs[0]
elif len(outputs) == 2:
outputs = outputs[1]
else:
outputs = outputs[1:]
return outputs
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(BERT, self).load_variable(checkpoint, name)
if name in [
'bert/embeddings/word_embeddings',
'cls/predictions/output_bias',
]:
return self.load_embeddings(variable)
elif name == 'cls/seq_relationship/output_weights':
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
"""在tensorflow中创建一个变量
"""
if name == 'cls/seq_relationship/output_weights':
value = value.T
return super(BERT, self).create_variable(name, value, dtype)
def variable_mapping(self):
"""映射到官方BERT权重格式
"""
mapping = {
'Embedding-Token': ['bert/embeddings/word_embeddings'],
'Embedding-Segment': ['bert/embeddings/token_type_embeddings'],
'Embedding-Position': ['bert/embeddings/position_embeddings'],
'Embedding-Norm': [
'bert/embeddings/LayerNorm/beta',
'bert/embeddings/LayerNorm/gamma',
],
'Embedding-Mapping': [
'bert/encoder/embedding_hidden_mapping_in/kernel',
'bert/encoder/embedding_hidden_mapping_in/bias',
],
'Pooler-Dense': [
'bert/pooler/dense/kernel',
'bert/pooler/dense/bias',
],
'NSP-Proba': [
'cls/seq_relationship/output_weights',
'cls/seq_relationship/output_bias',
],
'MLM-Dense': [
'cls/predictions/transform/dense/kernel',
'cls/predictions/transform/dense/bias',
],
'MLM-Norm': [
'cls/predictions/transform/LayerNorm/beta',
'cls/predictions/transform/LayerNorm/gamma',
],
'MLM-Bias': ['cls/predictions/output_bias'],
}
for i in range(self.num_hidden_layers):
prefix = 'bert/encoder/layer_%d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention/self/query/kernel',
prefix + 'attention/self/query/bias',
prefix + 'attention/self/key/kernel',
prefix + 'attention/self/key/bias',
prefix + 'attention/self/value/kernel',
prefix + 'attention/self/value/bias',
prefix + 'attention/output/dense/kernel',
prefix + 'attention/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'attention/output/LayerNorm/beta',
prefix + 'attention/output/LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/dense/kernel',
prefix + 'intermediate/dense/bias',
prefix + 'output/dense/kernel',
prefix + 'output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'output/LayerNorm/beta',
prefix + 'output/LayerNorm/gamma',
],
})
return mapping
class ALBERT(BERT):
"""构建ALBERT模型
"""
def apply_main_layers(self, inputs, index):
"""ALBERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-MultiHeadSelfAttention'
feed_forward_name = 'Transformer-FeedForward'
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
mapping.update({
'Transformer-MultiHeadSelfAttention': [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-MultiHeadSelfAttention-Norm': [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-FeedForward': [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-FeedForward-Norm': [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class ALBERT_Unshared(BERT):
"""解开ALBERT共享约束,当成BERT用
"""
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT_Unshared, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
for i in range(self.num_hidden_layers):
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class NEZHA(BERT):
"""华为推出的NAZHA模型
链接:https://arxiv.org/abs/1909.00204
"""
def apply_embeddings(self, inputs):
"""NEZHA的embedding是token、segment两者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=2,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""NEZHA的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi, x = x, [x, x, x, position_bias]
arguments = {'a_bias': None, 'p_bias': 'typical_relative'}
if attention_mask is not None:
arguments['a_bias'] = True
x.insert(3, attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def compute_position_bias(self, inputs=None):
"""经典相对位置编码
"""
if self.position_bias is None:
x = inputs
self.position_bias = self.apply(
inputs=[x, x],
layer=RelativePositionEmbedding,
input_dim=2 * 64 + 1,
output_dim=self.attention_head_size,
embeddings_initializer='Sinusoidal',
name='Embedding-Relative-Position',
trainable=False
)
return self.position_bias
class ELECTRA(BERT):
"""Google推出的ELECTRA模型
链接:https://arxiv.org/abs/2003.10555
"""
@insert_arguments(with_discriminator=False)
@delete_arguments('with_pool', 'with_mlm')
def __init__(
self,
max_position, # 序列最大长度
**kwargs # 其余参数
):
super(ELECTRA, self).__init__(max_position, **kwargs)
def apply_final_layers(self, inputs):
x = inputs
if self.with_discriminator:
if self.with_discriminator is True:
final_activation = 'sigmoid'
else:
final_activation = self.with_discriminator
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='Discriminator-Dense'
)
x = self.apply(
inputs=x,
layer=Dense,
units=1,
activation=final_activation,
kernel_initializer=self.initializer,
name='Discriminator-Prediction'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(ELECTRA, self).load_variable(checkpoint, name)
if name == 'electra/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
mapping = super(ELECTRA, self).variable_mapping()
mapping['Embedding-Mapping'] = [
'electra/embeddings_project/kernel',
'electra/embeddings_project/bias',
]
mapping = {
k: [i.replace('bert/', 'electra/') for i in v]
for k, v in mapping.items()
}
mapping['Discriminator-Dense'] = [
'discriminator_predictions/dense/kernel',
'discriminator_predictions/dense/bias',
]
mapping['Discriminator-Prediction'] = [
'discriminator_predictions/dense_1/kernel',
'discriminator_predictions/dense_1/bias',
]
return mapping
class GPT(LM_Mask, BERT):
"""构建GPT模型
链接:https://github.com/openai/finetune-transformer-lm
"""
@insert_arguments(final_activation='softmax')
@delete_arguments('with_pool', 'with_mlm')
def __init__(self, **kwargs):
super(GPT, self).__init__(**kwargs)
def apply_embeddings(self, inputs):
"""GPT的embedding是token、position、segment三者embedding之和
跟BERT的主要区别是三者相加之后没有加LayerNormalization层。
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
# Language Model部分
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=self.final_activation,
name='LM-Activation'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT, self).load_variable(checkpoint, name)
if name == 'gpt/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
"""映射到TF版GPT权重格式
"""
mapping = super(GPT, self).variable_mapping()
mapping = {
k: [
i.replace('bert/', 'gpt/').replace('encoder', 'transformer')
for i in v
]
for k, v in mapping.items()
}
return mapping
class GPT2(GPT):
"""构建GPT2模型
链接: https://github.com/openai/gpt-2
"""
def get_inputs(self):
"""GPT2的输入是token_ids
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""GPT2的embedding是token、position两者embedding之和
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask],
layer=MultiHeadAttention,
arguments={'a_bias': True},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Output-Dropout'
)
x = super(GPT2, self).apply_final_layers(x)
return x
def variable_mapping(self):
"""映射到TF版GPT2权重格式
"""
mapping = super(GPT2, self).variable_mapping()
mapping = {
k: [i.replace('output/LayerNorm', 'input/LayerNorm') for i in v]
for k, v in mapping.items()
}
mapping['Output-Norm'] = [
'gpt/output/LayerNorm/beta',
'gpt/output/LayerNorm/gamma',
]
return mapping
class GPT2_ML(GPT):
"""构建GPT2_ML模型
链接: https://github.com/imcaspar/gpt2-ml
注意:GPT2_ML虽然号称GPT2,但是它的结构其实更接近GPT,它自称GPT2的
原因大概是因为它开源的版本参数量达到了GPT2的15亿参数。
"""
def get_inputs(self):
"""GPT2_ML的输入是token_ids
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""GPT2_ML的embedding是token、position两者embedding之和
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2_ML的主体是基于Self-Attention的模块
顺序:Att --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x, attention_mask], {'a_bias': True}
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-0' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-1' % feed_forward_name
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT2_ML, self).load_variable(checkpoint, name)
if name == 'newslm/embeddings/word_embed':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
"""映射到官方GPT2_ML权重格式
"""
mapping = {
'Embedding-Token': ['newslm/embeddings/word_embed'],
'Embedding-Position': ['newslm/embeddings/pos_embed'],
'Embedding-Norm': [
'newslm/embeddings/LayerNorm_embed_norm/beta',
'newslm/embeddings/LayerNorm_embed_norm/gamma',
],
}
for i in range(self.num_hidden_layers):
prefix = 'newslm/layer%02d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'query_layer/kernel',
prefix + 'query_layer/bias',
prefix + 'key_layer/kernel',
prefix + 'key_layer/bias',
prefix + 'value_layer/kernel',
prefix + 'value_layer/bias',
prefix + 'context_projection_layer/kernel',
prefix + 'context_projection_layer/bias',
],
'Transformer-%d-FeedForward-Norm-0' % i: [
prefix + 'LayerNorm_mlp_ln0/beta',
prefix + 'LayerNorm_mlp_ln0/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/kernel',
prefix + 'intermediate/bias',
prefix + 'output/kernel',
prefix + 'output/bias',
],
'Transformer-%d-FeedForward-Norm-1' % i: [
prefix + 'LayerNorm_mlp_ln1/beta',
prefix + 'LayerNorm_mlp_ln1/gamma',
],
})
return mapping
class T5_Base(Transformer):
"""Google的T5模型(基类)
注意T5有两个版本,一开始放出来的版本称为t5.1.0,而后来放出了一个升级
版本称为t5.1.1,两者结构略有不同,包括后来放出来的多国语言版T5也采用
了t5.1.1的结构。
t5.1.0: https://github.com/google-research/text-to-text-transfer-transformer
t5.1.1: https://github.com/google-research/text-to-text-transfer-transformer/blob/master/released_checkpoints.md#t511
multilingual-t5: https://github.com/google-research/multilingual-t5
"""
@insert_arguments(version='t5.1.0')
def __init__(self, **kwargs):
super(T5_Base, self).__init__(**kwargs)
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(T5_Base, self).load_variable(checkpoint, name)
if name == 'shared/embedding':
return self.load_embeddings(variable)
elif name == 'decoder/logits/kernel':
return self.load_embeddings(variable.T).T
elif 'relative_attention_bias' in name:
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
"""在tensorflow中创建一个变量
"""
if 'relative_attention_bias' in name:
value = value.T
return super(T5_Base, self).create_variable(name, value, dtype)
def variable_mapping(self):
"""映射到官方T5权重格式
"""
mapping = {
'Embedding-Token': ['shared/embedding'],
'Encoder-Embedding-Relative-Position': [
'encoder/block_000/layer_000/SelfAttention/relative_attention_bias'
],
'Encoder-Output-Norm': ['encoder/final_layer_norm/scale'],
'Decoder-Embedding-Relative-Position': [
'decoder/block_000/layer_000/SelfAttention/relative_attention_bias',
],
'Decoder-Output-Norm': ['decoder/final_layer_norm/scale'],
}
for i in range(self.num_hidden_layers):
# Encoder主体
prefix = 'encoder/block_%03d/' % i
mapping.update({
'Encoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Encoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Encoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_001/DenseReluDense/wi/kernel',
prefix + 'layer_001/DenseReluDense/wo/kernel',
],
'Encoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
})
# Decoder主体
prefix = 'decoder/block_%03d/' % i
mapping.update({
'Decoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Decoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention' % i: [
prefix + 'layer_001/EncDecAttention/q',
prefix + 'layer_001/EncDecAttention/k',
prefix + 'layer_001/EncDecAttention/v',
prefix + 'layer_001/EncDecAttention/o',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
'Decoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_002/DenseReluDense/wi/kernel',
prefix + 'layer_002/DenseReluDense/wo/kernel',
],
'Decoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_002/layer_norm/scale',
],
})
if self.version == 't5.1.1':
mapping['Encoder-Output-Norm'] = ['encoder/rms_norm/scale']
mapping['Decoder-Output-Norm'] = ['decoder/rms_norm/scale']
mapping['Decoder-Output-LM'] = ['decoder/logits/kernel']
mapping = {
k: [i.replace('layer_norm', 'rms_norm') for i in v]
for k, v in mapping.items()
}
for i in range(self.num_hidden_layers):
for layer in [
'Encoder-Transformer-%d-FeedForward' % i,
'Decoder-Transformer-%d-FeedForward' % i
]:
mapping[layer] = [
mapping[layer][0][:-7] + '_0' + mapping[layer][0][-7:],
mapping[layer][0][:-7] + '_1' + mapping[layer][0][-7:],
mapping[layer][1]
]
return mapping
class T5_Encoder(T5_Base):
"""Google的T5模型(Encoder)
"""
def get_inputs(self):
"""T5的Encoder的输入只有token_ids
"""
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Encoder-Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Encoder-Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""T5的Encoder的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Encoder-Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Encoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, position_bias],
layer=MultiHeadAttention,
arguments={'p_bias': 't5_relative'},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Encoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Output-Dropout'
)
return x
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x = inputs
p = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=True,
embeddings_initializer=self.initializer,
name='Encoder-Embedding-Relative-Position'
)
self.position_bias = p
return self.position_bias
class T5_Decoder(LM_Mask, T5_Base):
"""Google的T5模型(Decoder)
"""
def __init__(self, with_lm=True, **kwargs):
super(T5_Decoder, self).__init__(**kwargs)
self.with_lm = with_lm
def get_inputs(self):
"""T5的Decoder的输入为context序列和token_ids
"""
c_in = self.apply(
layer=Input,
shape=(self.sequence_length, self.hidden_size),
name='Input-Context'
)
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Decoder-Input-Token'
)
return [c_in, x_in]
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
c, x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Decoder-Embedding-Mapping'
)
return [c, x]
def apply_main_layers(self, inputs, index):
"""T5的Dencoder主体是基于Self-Attention、Cross-Attention的模块
顺序:LN --> Att1 --> Add --> LN --> Att2 --> Add --> LN --> FFN --> Add
"""
c, x = inputs
z = self.layer_norm_conds[0]
self_attention_name = 'Decoder-Transformer-%d-MultiHeadSelfAttention' % index
cross_attention_name = 'Decoder-Transformer-%d-MultiHeadCrossAttention' % index
feed_forward_name = 'Decoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias([x, c])
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % self_attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask, position_bias[0]],
layer=MultiHeadAttention,
arguments={
'a_bias': True,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=self_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % self_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % self_attention_name
)
# Cross Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % cross_attention_name
)
x = self.apply(
inputs=[x, c, c, position_bias[1]],
layer=MultiHeadAttention,
arguments={
'a_bias': None,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=cross_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % cross_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % cross_attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return [c, x]
def apply_final_layers(self, inputs):
"""剩余部分
"""
c, x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Decoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Output-Dropout'
)
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x / np.sqrt(self.hidden_size),
mask=lambda i, m: m,
name='Decoder-Output-Scale'
)
if self.with_lm:
# 预测token概率部分
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
kernel_initializer=self.initializer,
name='Decoder-Output-Mapping'
)
lm_activation = 'softmax' if self.with_lm is True else self.with_lm
if self.version == 't5.1.0':
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=lm_activation,
name='Dencoder-Output-LM-Activation'
)
else:
x = self.apply(
inputs=x,
layer=Dense,
units=self.vocab_size,
activation=lm_activation,
use_bias=False,
kernel_initializer=self.initializer,
name='Decoder-Output-LM'
)
return x
def compute_attention_bias(self, inputs=None):
"""修改LM Mask的序列长度(从 self.inputs[0] 改为 self.inputs[1] )
"""
old_inputs = self.inputs[:]
self.inputs = [old_inputs[1]]
mask = super(T5_Decoder, self).compute_attention_bias(inputs)
self.inputs = old_inputs
return mask
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x, c = inputs
p1 = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
p2 = self.apply(
inputs=[x, c],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
self.position_bias = (p1, p2)
return self.position_bias
class T5(T5_Base):
"""Google的T5模型(Encoder-Decoder)
"""
def __init__(self, **kwargs):
super(T5, self).__init__(**kwargs)
kwargs['layers'] = self.layers
e_name, d_name = 'Encoder', 'Decoder'
if 'name' in kwargs:
e_name = '%s_%s' % (kwargs['name'], e_name)
d_name = '%s_%s' % (kwargs['name'], d_name)
del kwargs['name'] # 防止重复传参
self._encoder = T5_Encoder(name=e_name, **kwargs)
self._decoder = T5_Decoder(name=d_name, **kwargs)
def build(self, **kwargs):
"""同时构建Encoder和Decoder
"""
self._encoder.build(**kwargs)
self._decoder.build(**kwargs)
self.encoder = self._encoder.model
self.decoder = self._decoder.model
self.inputs = self.encoder.inputs + self.decoder.inputs[1:]
self.outputs = self.decoder(
self.encoder.outputs + self.decoder.inputs[1:]
)
self.model = Model(self.inputs, self.outputs)
def extend_with_language_model(BaseModel):
"""添加下三角的Attention Mask(语言模型用)
"""
class LanguageModel(LM_Mask, BaseModel):
"""带下三角Attention Mask的派生模型
"""
def __init__(self, *args, **kwargs):
super(LanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return LanguageModel
def extend_with_unified_language_model(BaseModel):
"""添加UniLM的Attention Mask(Seq2Seq模型用)
"""
class UnifiedLanguageModel(UniLM_Mask, BaseModel):
"""带UniLM的Attention Mask的派生模型
UniLM: https://arxiv.org/abs/1905.03197
"""
def __init__(self, *args, **kwargs):
super(UnifiedLanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return UnifiedLanguageModel
def build_transformer_model(
config_path=None,
checkpoint_path=None,
model='bert',
application='encoder',
return_keras_model=True,
**kwargs
):
"""根据配置文件构建模型,可选加载checkpoint权重
"""
configs = {}
if config_path is not None:
configs.update(json.load(open(config_path)))
configs.update(kwargs)
if 'max_position' not in configs:
configs['max_position'] = configs.get('max_position_embeddings', 512)
if 'dropout_rate' not in configs:
configs['dropout_rate'] = configs.get('hidden_dropout_prob')
if 'segment_vocab_size' not in configs:
configs['segment_vocab_size'] = configs.get('type_vocab_size', 2)
models = {
'bert': BERT,
'albert': ALBERT,
'albert_unshared': ALBERT_Unshared,
'roberta': BERT,
'nezha': NEZHA,
'electra': ELECTRA,
'gpt': GPT,
'gpt2': GPT2,
'gpt2_ml': GPT2_ML,
't5': T5,
't5_encoder': T5_Encoder,
't5_decoder': T5_Decoder,
't5.1.0': T5,
't5.1.0_encoder': T5_Encoder,
't5.1.0_decoder': T5_Decoder,
't5.1.1': T5,
't5.1.1_encoder': T5_Encoder,
't5.1.1_decoder': T5_Decoder,
}
if is_string(model):
model = model.lower()
MODEL = models[model]
else:
MODEL = model
application = application.lower()
if application in ['lm', 'unilm'] and model in ['electra', 't5']:
raise ValueError(
'"%s" model can not be used as "%s" application.\n' %
(model, application)
)
if application == 'lm':
MODEL = extend_with_language_model(MODEL)
elif application == 'unilm':
MODEL = extend_with_unified_language_model(MODEL)
if model.startswith('t5.1.1'):
configs['version'] = 't5.1.1'
transformer = MODEL(**configs)
# 此处以Transformer类中的build()函数创建模型.
transformer.build(**configs)
if checkpoint_path is not None:
transformer.load_weights_from_checkpoint(checkpoint_path)
if return_keras_model:
return transformer.model
else:
return transformer
|
set_tolerance
|
See abstract method.
Args:
abs_tol (float): absolute tolerance. Reset if supplied, ignored if not.
rel_tol (float): relative tolerance. Reset if supplied, ignored if not.
|
from ._stopping_criterion import StoppingCriterion
from ..accumulate_data import LDTransformData
from ..util import MaxSamplesWarning, ParameterError, ParameterWarning
from numpy import *
from time import time
import warnings
class CubQMCLDG(StoppingCriterion):
"""
Abstract class for CubQMC{LD}G where LD is a low discrepancy discrete distribution.
See subclasses for implementation differences for each LD sequence.
"""
def __init__(self, integrand, abs_tol, rel_tol, n_init, n_max, fudge, check_cone,
control_variates, control_variate_means, update_beta, ptransform,
coefv, allowed_levels, allowed_distribs, cast_complex):
self.parameters = ['abs_tol','rel_tol','n_init','n_max']
# Input Checks
self.abs_tol = float(abs_tol)
self.rel_tol = float(rel_tol)
m_min = log2(n_init)
m_max = log2(n_max)
if m_min%1 != 0. or m_min < 8. or m_max%1 != 0.:
warning_s = '''
n_init and n_max must be a powers of 2.
n_init must be >= 2^8.
Using n_init = 2^10 and n_max=2^35.'''
warnings.warn(warning_s, ParameterWarning)
m_min = 10.
m_max = 35.
self.n_init = 2.**m_min
self.n_max = 2.**m_max
self.m_min = m_min
self.m_max = m_max
self.fudge = fudge
self.check_cone = check_cone
self.coefv = coefv
self.ptransform = ptransform
self.cast_complex = cast_complex
# QMCPy Objs
self.integrand = integrand
self.true_measure = self.integrand.true_measure
self.discrete_distrib = self.integrand.discrete_distrib
self.cv = control_variates
self.cv_mu = control_variate_means
self.ub = update_beta
# Verify Compliant Construction
super(CubQMCLDG,self).__init__(allowed_levels, allowed_distribs, allow_vectorized_integrals=False)
def integrate(self):
""" See abstract method. """
# Construct AccumulateData Object to House Integration data
self.data = LDTransformData(self, self.integrand, self.true_measure, self.discrete_distrib,
self.coefv, self.m_min, self.m_max, self.fudge, self.check_cone, ptransform=self.ptransform,
cast_complex=self.cast_complex, control_variates=self.cv, control_variate_means=self.cv_mu, update_beta=self.ub)
t_start = time()
while True:
self.data.update_data()
# Check the end of the algorithm
self.data.error_bound = self.data.fudge(self.data.m)*self.data.stilde
# Compute optimal estimator
ub = max(self.abs_tol, self.rel_tol*abs(self.data.solution + self.data.error_bound))
lb = max(self.abs_tol, self.rel_tol*abs(self.data.solution - self.data.error_bound))
self.data.solution = self.data.solution - self.data.error_bound*(ub-lb) / (ub+lb)
if 4*self.data.error_bound**2./(ub+lb)**2. <= 1.:
# stopping criterion met
break
elif self.data.m == self.data.m_max:
# doubling samples would go over n_max
warning_s = """
Alread generated %d samples.
Trying to generate %d new samples would exceed n_max = %d.
No more samples will be generated.
Note that error tolerances may no longer be satisfied""" \
% (int(2**self.data.m), int(2**self.data.m), int(2**self.data.m_max))
warnings.warn(warning_s, MaxSamplesWarning)
break
else:
# double sample size
self.data.m += 1.
self.data.time_integrate = time() - t_start
return self.data.solution, self.data
# MASKED: set_tolerance function (lines 85-94)
|
def set_tolerance(self, abs_tol=None, rel_tol=None):
"""
See abstract method.
Args:
abs_tol (float): absolute tolerance. Reset if supplied, ignored if not.
rel_tol (float): relative tolerance. Reset if supplied, ignored if not.
"""
if abs_tol != None: self.abs_tol = abs_tol
if rel_tol != None: self.rel_tol = rel_tol
| 85
| 94
|
from ._stopping_criterion import StoppingCriterion
from ..accumulate_data import LDTransformData
from ..util import MaxSamplesWarning, ParameterError, ParameterWarning
from numpy import *
from time import time
import warnings
class CubQMCLDG(StoppingCriterion):
"""
Abstract class for CubQMC{LD}G where LD is a low discrepancy discrete distribution.
See subclasses for implementation differences for each LD sequence.
"""
def __init__(self, integrand, abs_tol, rel_tol, n_init, n_max, fudge, check_cone,
control_variates, control_variate_means, update_beta, ptransform,
coefv, allowed_levels, allowed_distribs, cast_complex):
self.parameters = ['abs_tol','rel_tol','n_init','n_max']
# Input Checks
self.abs_tol = float(abs_tol)
self.rel_tol = float(rel_tol)
m_min = log2(n_init)
m_max = log2(n_max)
if m_min%1 != 0. or m_min < 8. or m_max%1 != 0.:
warning_s = '''
n_init and n_max must be a powers of 2.
n_init must be >= 2^8.
Using n_init = 2^10 and n_max=2^35.'''
warnings.warn(warning_s, ParameterWarning)
m_min = 10.
m_max = 35.
self.n_init = 2.**m_min
self.n_max = 2.**m_max
self.m_min = m_min
self.m_max = m_max
self.fudge = fudge
self.check_cone = check_cone
self.coefv = coefv
self.ptransform = ptransform
self.cast_complex = cast_complex
# QMCPy Objs
self.integrand = integrand
self.true_measure = self.integrand.true_measure
self.discrete_distrib = self.integrand.discrete_distrib
self.cv = control_variates
self.cv_mu = control_variate_means
self.ub = update_beta
# Verify Compliant Construction
super(CubQMCLDG,self).__init__(allowed_levels, allowed_distribs, allow_vectorized_integrals=False)
def integrate(self):
""" See abstract method. """
# Construct AccumulateData Object to House Integration data
self.data = LDTransformData(self, self.integrand, self.true_measure, self.discrete_distrib,
self.coefv, self.m_min, self.m_max, self.fudge, self.check_cone, ptransform=self.ptransform,
cast_complex=self.cast_complex, control_variates=self.cv, control_variate_means=self.cv_mu, update_beta=self.ub)
t_start = time()
while True:
self.data.update_data()
# Check the end of the algorithm
self.data.error_bound = self.data.fudge(self.data.m)*self.data.stilde
# Compute optimal estimator
ub = max(self.abs_tol, self.rel_tol*abs(self.data.solution + self.data.error_bound))
lb = max(self.abs_tol, self.rel_tol*abs(self.data.solution - self.data.error_bound))
self.data.solution = self.data.solution - self.data.error_bound*(ub-lb) / (ub+lb)
if 4*self.data.error_bound**2./(ub+lb)**2. <= 1.:
# stopping criterion met
break
elif self.data.m == self.data.m_max:
# doubling samples would go over n_max
warning_s = """
Alread generated %d samples.
Trying to generate %d new samples would exceed n_max = %d.
No more samples will be generated.
Note that error tolerances may no longer be satisfied""" \
% (int(2**self.data.m), int(2**self.data.m), int(2**self.data.m_max))
warnings.warn(warning_s, MaxSamplesWarning)
break
else:
# double sample size
self.data.m += 1.
self.data.time_integrate = time() - t_start
return self.data.solution, self.data
def set_tolerance(self, abs_tol=None, rel_tol=None):
"""
See abstract method.
Args:
abs_tol (float): absolute tolerance. Reset if supplied, ignored if not.
rel_tol (float): relative tolerance. Reset if supplied, ignored if not.
"""
if abs_tol != None: self.abs_tol = abs_tol
if rel_tol != None: self.rel_tol = rel_tol
|
get_event_channel
|
Event Channel.
API Version: 2020-04-01-preview.
:param str event_channel_name: Name of the event channel.
:param str partner_namespace_name: Name of the partner namespace.
:param str resource_group_name: The name of the resource group within the user's subscription.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetEventChannelResult',
'AwaitableGetEventChannelResult',
'get_event_channel',
]
@pulumi.output_type
class GetEventChannelResult:
"""
Event Channel.
"""
def __init__(__self__, destination=None, expiration_time_if_not_activated_utc=None, filter=None, id=None, name=None, partner_topic_friendly_description=None, partner_topic_readiness_state=None, provisioning_state=None, source=None, type=None):
if destination and not isinstance(destination, dict):
raise TypeError("Expected argument 'destination' to be a dict")
pulumi.set(__self__, "destination", destination)
if expiration_time_if_not_activated_utc and not isinstance(expiration_time_if_not_activated_utc, str):
raise TypeError("Expected argument 'expiration_time_if_not_activated_utc' to be a str")
pulumi.set(__self__, "expiration_time_if_not_activated_utc", expiration_time_if_not_activated_utc)
if filter and not isinstance(filter, dict):
raise TypeError("Expected argument 'filter' to be a dict")
pulumi.set(__self__, "filter", filter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if partner_topic_friendly_description and not isinstance(partner_topic_friendly_description, str):
raise TypeError("Expected argument 'partner_topic_friendly_description' to be a str")
pulumi.set(__self__, "partner_topic_friendly_description", partner_topic_friendly_description)
if partner_topic_readiness_state and not isinstance(partner_topic_readiness_state, str):
raise TypeError("Expected argument 'partner_topic_readiness_state' to be a str")
pulumi.set(__self__, "partner_topic_readiness_state", partner_topic_readiness_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def destination(self) -> Optional['outputs.EventChannelDestinationResponse']:
"""
Represents the destination of an event channel.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="expirationTimeIfNotActivatedUtc")
def expiration_time_if_not_activated_utc(self) -> Optional[str]:
"""
Expiration time of the event channel. If this timer expires while the corresponding partner topic is never activated,
the event channel and corresponding partner topic are deleted.
"""
return pulumi.get(self, "expiration_time_if_not_activated_utc")
@property
@pulumi.getter
def filter(self) -> Optional['outputs.EventChannelFilterResponse']:
"""
Information about the filter for the event channel.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnerTopicFriendlyDescription")
def partner_topic_friendly_description(self) -> Optional[str]:
"""
Friendly description about the topic. This can be set by the publisher/partner to show custom description for the customer partner topic.
This will be helpful to remove any ambiguity of the origin of creation of the partner topic for the customer.
"""
return pulumi.get(self, "partner_topic_friendly_description")
@property
@pulumi.getter(name="partnerTopicReadinessState")
def partner_topic_readiness_state(self) -> str:
"""
The readiness state of the corresponding partner topic.
"""
return pulumi.get(self, "partner_topic_readiness_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the event channel.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> Optional['outputs.EventChannelSourceResponse']:
"""
Source of the event channel. This represents a unique resource in the partner's resource model.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetEventChannelResult(GetEventChannelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventChannelResult(
destination=self.destination,
expiration_time_if_not_activated_utc=self.expiration_time_if_not_activated_utc,
filter=self.filter,
id=self.id,
name=self.name,
partner_topic_friendly_description=self.partner_topic_friendly_description,
partner_topic_readiness_state=self.partner_topic_readiness_state,
provisioning_state=self.provisioning_state,
source=self.source,
type=self.type)
# MASKED: get_event_channel function (lines 156-189)
|
def get_event_channel(event_channel_name: Optional[str] = None,
partner_namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventChannelResult:
"""
Event Channel.
API Version: 2020-04-01-preview.
:param str event_channel_name: Name of the event channel.
:param str partner_namespace_name: Name of the partner namespace.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['eventChannelName'] = event_channel_name
__args__['partnerNamespaceName'] = partner_namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid:getEventChannel', __args__, opts=opts, typ=GetEventChannelResult).value
return AwaitableGetEventChannelResult(
destination=__ret__.destination,
expiration_time_if_not_activated_utc=__ret__.expiration_time_if_not_activated_utc,
filter=__ret__.filter,
id=__ret__.id,
name=__ret__.name,
partner_topic_friendly_description=__ret__.partner_topic_friendly_description,
partner_topic_readiness_state=__ret__.partner_topic_readiness_state,
provisioning_state=__ret__.provisioning_state,
source=__ret__.source,
type=__ret__.type)
| 156
| 189
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetEventChannelResult',
'AwaitableGetEventChannelResult',
'get_event_channel',
]
@pulumi.output_type
class GetEventChannelResult:
"""
Event Channel.
"""
def __init__(__self__, destination=None, expiration_time_if_not_activated_utc=None, filter=None, id=None, name=None, partner_topic_friendly_description=None, partner_topic_readiness_state=None, provisioning_state=None, source=None, type=None):
if destination and not isinstance(destination, dict):
raise TypeError("Expected argument 'destination' to be a dict")
pulumi.set(__self__, "destination", destination)
if expiration_time_if_not_activated_utc and not isinstance(expiration_time_if_not_activated_utc, str):
raise TypeError("Expected argument 'expiration_time_if_not_activated_utc' to be a str")
pulumi.set(__self__, "expiration_time_if_not_activated_utc", expiration_time_if_not_activated_utc)
if filter and not isinstance(filter, dict):
raise TypeError("Expected argument 'filter' to be a dict")
pulumi.set(__self__, "filter", filter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if partner_topic_friendly_description and not isinstance(partner_topic_friendly_description, str):
raise TypeError("Expected argument 'partner_topic_friendly_description' to be a str")
pulumi.set(__self__, "partner_topic_friendly_description", partner_topic_friendly_description)
if partner_topic_readiness_state and not isinstance(partner_topic_readiness_state, str):
raise TypeError("Expected argument 'partner_topic_readiness_state' to be a str")
pulumi.set(__self__, "partner_topic_readiness_state", partner_topic_readiness_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def destination(self) -> Optional['outputs.EventChannelDestinationResponse']:
"""
Represents the destination of an event channel.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="expirationTimeIfNotActivatedUtc")
def expiration_time_if_not_activated_utc(self) -> Optional[str]:
"""
Expiration time of the event channel. If this timer expires while the corresponding partner topic is never activated,
the event channel and corresponding partner topic are deleted.
"""
return pulumi.get(self, "expiration_time_if_not_activated_utc")
@property
@pulumi.getter
def filter(self) -> Optional['outputs.EventChannelFilterResponse']:
"""
Information about the filter for the event channel.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnerTopicFriendlyDescription")
def partner_topic_friendly_description(self) -> Optional[str]:
"""
Friendly description about the topic. This can be set by the publisher/partner to show custom description for the customer partner topic.
This will be helpful to remove any ambiguity of the origin of creation of the partner topic for the customer.
"""
return pulumi.get(self, "partner_topic_friendly_description")
@property
@pulumi.getter(name="partnerTopicReadinessState")
def partner_topic_readiness_state(self) -> str:
"""
The readiness state of the corresponding partner topic.
"""
return pulumi.get(self, "partner_topic_readiness_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the event channel.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> Optional['outputs.EventChannelSourceResponse']:
"""
Source of the event channel. This represents a unique resource in the partner's resource model.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetEventChannelResult(GetEventChannelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventChannelResult(
destination=self.destination,
expiration_time_if_not_activated_utc=self.expiration_time_if_not_activated_utc,
filter=self.filter,
id=self.id,
name=self.name,
partner_topic_friendly_description=self.partner_topic_friendly_description,
partner_topic_readiness_state=self.partner_topic_readiness_state,
provisioning_state=self.provisioning_state,
source=self.source,
type=self.type)
def get_event_channel(event_channel_name: Optional[str] = None,
partner_namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventChannelResult:
"""
Event Channel.
API Version: 2020-04-01-preview.
:param str event_channel_name: Name of the event channel.
:param str partner_namespace_name: Name of the partner namespace.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['eventChannelName'] = event_channel_name
__args__['partnerNamespaceName'] = partner_namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid:getEventChannel', __args__, opts=opts, typ=GetEventChannelResult).value
return AwaitableGetEventChannelResult(
destination=__ret__.destination,
expiration_time_if_not_activated_utc=__ret__.expiration_time_if_not_activated_utc,
filter=__ret__.filter,
id=__ret__.id,
name=__ret__.name,
partner_topic_friendly_description=__ret__.partner_topic_friendly_description,
partner_topic_readiness_state=__ret__.partner_topic_readiness_state,
provisioning_state=__ret__.provisioning_state,
source=__ret__.source,
type=__ret__.type)
|
trim
|
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
# MASKED: trim function (lines 56-97)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
| 56
| 97
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
er
|
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
# MASKED: er function (lines 112-151)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
| 112
| 151
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
dri
|
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
# MASKED: dri function (lines 153-192)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
| 153
| 192
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
evaluate
|
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
# MASKED: evaluate function (lines 238-256)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
| 238
| 256
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
rbo
|
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
# MASKED: rbo function (lines 320-381)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
| 320
| 381
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
rmse
|
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
# MASKED: rmse function (lines 383-445)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
| 383
| 445
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
nrmse
|
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
# MASKED: nrmse function (lines 447-506)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
| 447
| 506
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
evaluate
|
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
# MASKED: evaluate function (lines 558-576)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
| 558
| 576
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
_get_level_lengths
|
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
# MASKED: _get_level_lengths function (lines 1316-1356)
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
| 1,316
| 1,356
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
render
|
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
# MASKED: render function (lines 421-469)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
| 421
| 469
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
_compute
|
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
# MASKED: _compute function (lines 518-530)
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
| 518
| 530
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
set_properties
|
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
# MASKED: set_properties function (lines 987-1012)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
| 987
| 1,012
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
bar
|
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
# MASKED: bar function (lines 1073-1141)
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
| 1,073
| 1,141
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
send_email
|
Sends an email to target email with given message.
Args:
message (str): message you're sending
|
import time
from bs4 import BeautifulSoup
import requests
import json
from datetime import datetime, timedelta
import psycopg2
import smtplib
import os
DATABASE = os.environ["DATABASE"]
USER = os.environ["USER"]
PASSWORD = os.environ["PASSWORD"]
HOST = os.environ["HOST"]
# MASKED: send_email function (lines 17-35)
def get_data() -> None:
"""
Infinite loop of every 10min requests to Vilnius vaccination center.
Collects count of vaccines and adds to PostgreSQL database.
Sends an email if Pfizer vaccine is available.
"""
while True:
sql_connection = psycopg2.connect(
database=DATABASE, user=USER, password=PASSWORD, host=HOST
)
# Connect to DB
cur = sql_connection.cursor()
headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"sec-ch-ua": "^\\^",
"sec-ch-ua-mobile": "?0",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "cross-site",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Accept-Language": "en-US,en;q=0.9",
}
page = requests.get(
"https://vilnius-vac.myhybridlab.com/selfregister/vaccine", headers=headers
)
soup = BeautifulSoup(page.content, "html.parser")
vaccines = soup.find("vaccine-rooms", class_=None)[":vaccine-rooms"]
json_object = json.loads(vaccines)
# Time
time_raw = soup.find("small", class_="text-muted").get_text().split()
time_str = time_raw[2] + " " + time_raw[3]
dt = datetime.fromisoformat(time_str)
now = datetime.now().replace(microsecond=0)
eet_dt = now + timedelta(hours=3)
diff_secs = (eet_dt - dt).seconds
total_sleep = 602 - diff_secs
moderna = json_object[0]["free_total"]
pfizer = json_object[1]["free_total"]
astra = json_object[2]["free_total"]
janssen = json_object[3]["free_total"]
cur.execute(
f"INSERT INTO vilnius_vakcinos (time, moderna, pfizer, astra_zeneca, janssen) VALUES ('{time_str}', {moderna}, {pfizer}, {astra}, {janssen});"
)
sql_connection.commit()
sql_connection.close()
if pfizer > 0:
send_email(
"Pfizer count: {pfizer}, link to register: https://vilnius-vac.myhybridlab.com/selfregister/vaccine"
)
time.sleep(total_sleep)
if __name__ == "__main__":
get_data()
|
def send_email(message: str) -> None:
"""
Sends an email to target email with given message.
Args:
message (str): message you're sending
"""
with open("../creds.json", "r") as f:
creds = json.loads(f)
gmail_user = creds["user"]
gmail_pass = creds["pass"]
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(gmail_user, gmail_pass)
server.sendmail(gmail_user, creds["target"], message)
except:
print("Email didnt work...")
| 17
| 35
|
import time
from bs4 import BeautifulSoup
import requests
import json
from datetime import datetime, timedelta
import psycopg2
import smtplib
import os
DATABASE = os.environ["DATABASE"]
USER = os.environ["USER"]
PASSWORD = os.environ["PASSWORD"]
HOST = os.environ["HOST"]
def send_email(message: str) -> None:
"""
Sends an email to target email with given message.
Args:
message (str): message you're sending
"""
with open("../creds.json", "r") as f:
creds = json.loads(f)
gmail_user = creds["user"]
gmail_pass = creds["pass"]
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(gmail_user, gmail_pass)
server.sendmail(gmail_user, creds["target"], message)
except:
print("Email didnt work...")
def get_data() -> None:
"""
Infinite loop of every 10min requests to Vilnius vaccination center.
Collects count of vaccines and adds to PostgreSQL database.
Sends an email if Pfizer vaccine is available.
"""
while True:
sql_connection = psycopg2.connect(
database=DATABASE, user=USER, password=PASSWORD, host=HOST
)
# Connect to DB
cur = sql_connection.cursor()
headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"sec-ch-ua": "^\\^",
"sec-ch-ua-mobile": "?0",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "cross-site",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Accept-Language": "en-US,en;q=0.9",
}
page = requests.get(
"https://vilnius-vac.myhybridlab.com/selfregister/vaccine", headers=headers
)
soup = BeautifulSoup(page.content, "html.parser")
vaccines = soup.find("vaccine-rooms", class_=None)[":vaccine-rooms"]
json_object = json.loads(vaccines)
# Time
time_raw = soup.find("small", class_="text-muted").get_text().split()
time_str = time_raw[2] + " " + time_raw[3]
dt = datetime.fromisoformat(time_str)
now = datetime.now().replace(microsecond=0)
eet_dt = now + timedelta(hours=3)
diff_secs = (eet_dt - dt).seconds
total_sleep = 602 - diff_secs
moderna = json_object[0]["free_total"]
pfizer = json_object[1]["free_total"]
astra = json_object[2]["free_total"]
janssen = json_object[3]["free_total"]
cur.execute(
f"INSERT INTO vilnius_vakcinos (time, moderna, pfizer, astra_zeneca, janssen) VALUES ('{time_str}', {moderna}, {pfizer}, {astra}, {janssen});"
)
sql_connection.commit()
sql_connection.close()
if pfizer > 0:
send_email(
"Pfizer count: {pfizer}, link to register: https://vilnius-vac.myhybridlab.com/selfregister/vaccine"
)
time.sleep(total_sleep)
if __name__ == "__main__":
get_data()
|
default_extractors
|
Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
# MASKED: default_extractors function (lines 357-444)
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
| 357
| 444
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
default_evaluators
|
Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
# MASKED: default_evaluators function (lines 447-509)
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
| 447
| 509
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
default_writers
|
Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
# MASKED: default_writers function (lines 512-549)
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
| 512
| 549
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
WriteResults
|
Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
# MASKED: WriteResults function (lines 648-671)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
| 648
| 671
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
single_model_analysis
|
Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
# MASKED: single_model_analysis function (lines 1001-1036)
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
| 1,001
| 1,036
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
multiple_model_analysis
|
Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
# MASKED: multiple_model_analysis function (lines 1039-1056)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
| 1,039
| 1,056
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
multiple_data_analysis
|
Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
# MASKED: multiple_data_analysis function (lines 1059-1076)
|
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
| 1,059
| 1,076
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
|
cross_channel_threshold_detector
|
Parameters
----------
multichannel : np.array
Msamples x Nchannels audio data
fs : float >0
detector_function : function, optional
The function used to detect the start and end of a signal.
Any custom detector function can be given, the compulsory inputs
are audio np.array, sample rate and the function should accept keyword
arguments (even if it doesn't use them.)
Defaults to dBrms_detector.
Returns
-------
all_detections : list
A list with sublists containing start-stop times of the detections
in each channel. Each sublist contains the detections in one channel.
Notes
-----
For further keyword arguments see the `threshold_detector` function
See Also
--------
dBrms_detector
|
'''
Deals with the actual detection of signals in multichannel audio files.
There are two problems that need to solved while detecting a signal of interest.
#. within-channel signal detection
#. across-channel correspondence matching
Within-channel signal detection
-------------------------------
This task involves `locally` checking if there are any signals of interest in one channel at a time. The exact methods used for
the within-channel can be set by the user, though the simplest is of course a basic threshold-type detector. Whenever the
signal goes beyond a particular threshold, a signal is considered to be in that region.
Built-in detection routines
---------------------------
The detection module has a few simple detection routines. More advanced routines
are unlikely to form a core part of the package, and need to be written by the
user.
#. dBrms_detector : Calculates the moving dB rms profile of an audio clip. The
User needs to define the size of the moving window and the threshold in dB rms.
#. envelope_detector : Generates the Hilbert envelop of the audio clip. Regions above
the set threshold in dB peak amplitude are defined as detections. This method is faster
than the dBrms_detector.
'''
import matplotlib.pyplot as plt
plt.rcParams['agg.path.chunksize']=10000
import numpy as np
import scipy.signal as signal
import scipy.io.wavfile as wav
import scipy.ndimage as ndimage
import tqdm
from batracker.common_dsp.sigproc import *
# MASKED: cross_channel_threshold_detector function (lines 39-75)
def dBrms_detector(one_channel, fs, **kwargs):
'''
Calculates the dB rms profile of the input audio and
selects regions which arae above the profile.
Parameters
----------
one_channel
fs
dbrms_threshold: float, optional
Defaults to -50 dB rms
dbrms_window: float, optional
The window which is used to calculate the dB rms profile
in seconds. Defaults to 0.001 seconds.
Returns
-------
detections : list with tuples
Each tuple corresponds to a candidate signal region
'''
if one_channel.ndim > 1:
raise IndexError(f'Input audio must be flattened, and have only 1 dimension. \
Current audio has {one_channel.ndim} dimensions')
dbrms_window = kwargs.get('dbrms_window',0.001) # seconds
dbrms_threshold = kwargs.get('dbrms_threshold', -50)
window_samples = int(fs*dbrms_window)
dBrms_profile = dB(moving_rms(one_channel, window_size=window_samples))
labelled, num_regions = ndimage.label(dBrms_profile>dbrms_threshold)
if num_regions==0:
print (f'No regions above threshold: {dbrms_threshold} dBrms found in this channel!')
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above]
return regions_above_timestamps
def envelope_detector(audio, fs, **kwargs):
'''
Generates the Hilbert envelope of the audio. Signals are detected
wherever the envelope goes beyond a user-defined threshold value.
Two main options are to segment loud signals with reference to dB peak or
with reference dB above floor level.
Parameters
----------
audio
fs
Keyword Arguments
-----------------
threshold_db_floor: float, optional
The threshold for signal detection in dB above the floor level. The 5%ile level of the whole envelope is chosen as
the floor level. If not specified, then threshold_dbpeak is used to segment signals.
threshold_dbpeak : float, optional
The value beyond which a signal is considered to start.
Used only if relative_to_baseline is True.
lowpass_durn: float, optional
The highest time-resolution of envelope fluctuation to keep.
This effectively performs a low-pass at 1/lowpass_durn Hz on the raw envelope
signal.
Returns
-------
regions_above_timestamps
'''
envelope = np.abs(signal.hilbert(audio))
if not kwargs.get('lowpass_durn') is None:
lowpass_durn = kwargs['lowpass_durn'] # seconds
freq = 1.0/lowpass_durn
b,a = signal.butter(1, freq/(fs*0.5),'lowpass')
envelope = signal.filtfilt(b,a,envelope)
if not kwargs.get('threshold_db_floor', None) is None:
floor_level = np.percentile(20*np.log10(envelope),5)
threshold_db = floor_level + kwargs['threshold_db_floor']
else:
# get regions above the threshold
threshold_db = kwargs['threshold_dbpeak']
linear_threshold = 10**(threshold_db/20)
labelled, num_detections = ndimage.label(envelope>=linear_threshold)
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs ) for each in regions_above]
return regions_above_timestamps
def get_start_stop_times(findobjects_tuple, fs):
'''
'''
only_tuple = findobjects_tuple[0]
start, stop = only_tuple.start/fs, only_tuple.stop/fs
return start, stop
def moving_rms(X, **kwargs):
'''Calculates moving rms of a signal with given window size.
Outputs np.array of *same* size as X. The rms of the
last few samples <= window_size away from the end are assigned
to last full-window rms calculated
Parameters
----------
X : np.array
Signal of interest.
window_size : int, optional
Defaults to 125 samples.
Returns
-------
all_rms : np.array
Moving rms of the signal.
'''
window_size = kwargs.get('window_size', 125)
starts = np.arange(0, X.size)
stops = starts+window_size
valid = stops<X.size
valid_starts = np.int32(starts[valid])
valid_stops = np.int32(stops[valid])
all_rms = np.ones(X.size).reshape(-1,1)*999
for i, (start, stop) in enumerate(zip(valid_starts, valid_stops)):
rms_value = rms(X[start:stop])
all_rms[i] = rms_value
# replace all un-assigned samples with the last rms value
all_rms[all_rms==999] = np.nan
return all_rms
#
#if __name__ == '__main__':
# import scipy.signal as signal
# # trying out the hilbert envelope method:
# fs = 250000
# background = -60 # dB rms
# audio = np.random.normal(0, 10**(background/20), fs)
# duration = 0.005
# sound_start = 0.05
# t = np.linspace(0, duration, int(fs*duration))
# bat_call = signal.chirp(t,90000, 25000, t[-1])
# bat_call *= 0.5
# sound_stop = sound_start+duration
#
# start, end = np.int32(np.array([sound_start,
# sound_stop])*fs)
# audio[start:end] += bat_call
#
# envelope = np.abs(signal.hilbert(audio))
#
# dets = envelope_detector(audio, fs, threshold_dbpeak=-20)
# print(dets)
##
|
def cross_channel_threshold_detector(multichannel, fs, **kwargs):
'''
Parameters
----------
multichannel : np.array
Msamples x Nchannels audio data
fs : float >0
detector_function : function, optional
The function used to detect the start and end of a signal.
Any custom detector function can be given, the compulsory inputs
are audio np.array, sample rate and the function should accept keyword
arguments (even if it doesn't use them.)
Defaults to dBrms_detector.
Returns
-------
all_detections : list
A list with sublists containing start-stop times of the detections
in each channel. Each sublist contains the detections in one channel.
Notes
-----
For further keyword arguments see the `threshold_detector` function
See Also
--------
dBrms_detector
'''
samples, channels = multichannel.shape
detector_function = kwargs.get('detector_function', dBrms_detector)
print(channels, samples)
all_detections = []
for each in tqdm.tqdm(range(channels)):
all_detections.append(detector_function(multichannel[:,each], fs, **kwargs))
return all_detections
| 39
| 75
|
'''
Deals with the actual detection of signals in multichannel audio files.
There are two problems that need to solved while detecting a signal of interest.
#. within-channel signal detection
#. across-channel correspondence matching
Within-channel signal detection
-------------------------------
This task involves `locally` checking if there are any signals of interest in one channel at a time. The exact methods used for
the within-channel can be set by the user, though the simplest is of course a basic threshold-type detector. Whenever the
signal goes beyond a particular threshold, a signal is considered to be in that region.
Built-in detection routines
---------------------------
The detection module has a few simple detection routines. More advanced routines
are unlikely to form a core part of the package, and need to be written by the
user.
#. dBrms_detector : Calculates the moving dB rms profile of an audio clip. The
User needs to define the size of the moving window and the threshold in dB rms.
#. envelope_detector : Generates the Hilbert envelop of the audio clip. Regions above
the set threshold in dB peak amplitude are defined as detections. This method is faster
than the dBrms_detector.
'''
import matplotlib.pyplot as plt
plt.rcParams['agg.path.chunksize']=10000
import numpy as np
import scipy.signal as signal
import scipy.io.wavfile as wav
import scipy.ndimage as ndimage
import tqdm
from batracker.common_dsp.sigproc import *
def cross_channel_threshold_detector(multichannel, fs, **kwargs):
'''
Parameters
----------
multichannel : np.array
Msamples x Nchannels audio data
fs : float >0
detector_function : function, optional
The function used to detect the start and end of a signal.
Any custom detector function can be given, the compulsory inputs
are audio np.array, sample rate and the function should accept keyword
arguments (even if it doesn't use them.)
Defaults to dBrms_detector.
Returns
-------
all_detections : list
A list with sublists containing start-stop times of the detections
in each channel. Each sublist contains the detections in one channel.
Notes
-----
For further keyword arguments see the `threshold_detector` function
See Also
--------
dBrms_detector
'''
samples, channels = multichannel.shape
detector_function = kwargs.get('detector_function', dBrms_detector)
print(channels, samples)
all_detections = []
for each in tqdm.tqdm(range(channels)):
all_detections.append(detector_function(multichannel[:,each], fs, **kwargs))
return all_detections
def dBrms_detector(one_channel, fs, **kwargs):
'''
Calculates the dB rms profile of the input audio and
selects regions which arae above the profile.
Parameters
----------
one_channel
fs
dbrms_threshold: float, optional
Defaults to -50 dB rms
dbrms_window: float, optional
The window which is used to calculate the dB rms profile
in seconds. Defaults to 0.001 seconds.
Returns
-------
detections : list with tuples
Each tuple corresponds to a candidate signal region
'''
if one_channel.ndim > 1:
raise IndexError(f'Input audio must be flattened, and have only 1 dimension. \
Current audio has {one_channel.ndim} dimensions')
dbrms_window = kwargs.get('dbrms_window',0.001) # seconds
dbrms_threshold = kwargs.get('dbrms_threshold', -50)
window_samples = int(fs*dbrms_window)
dBrms_profile = dB(moving_rms(one_channel, window_size=window_samples))
labelled, num_regions = ndimage.label(dBrms_profile>dbrms_threshold)
if num_regions==0:
print (f'No regions above threshold: {dbrms_threshold} dBrms found in this channel!')
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above]
return regions_above_timestamps
def envelope_detector(audio, fs, **kwargs):
'''
Generates the Hilbert envelope of the audio. Signals are detected
wherever the envelope goes beyond a user-defined threshold value.
Two main options are to segment loud signals with reference to dB peak or
with reference dB above floor level.
Parameters
----------
audio
fs
Keyword Arguments
-----------------
threshold_db_floor: float, optional
The threshold for signal detection in dB above the floor level. The 5%ile level of the whole envelope is chosen as
the floor level. If not specified, then threshold_dbpeak is used to segment signals.
threshold_dbpeak : float, optional
The value beyond which a signal is considered to start.
Used only if relative_to_baseline is True.
lowpass_durn: float, optional
The highest time-resolution of envelope fluctuation to keep.
This effectively performs a low-pass at 1/lowpass_durn Hz on the raw envelope
signal.
Returns
-------
regions_above_timestamps
'''
envelope = np.abs(signal.hilbert(audio))
if not kwargs.get('lowpass_durn') is None:
lowpass_durn = kwargs['lowpass_durn'] # seconds
freq = 1.0/lowpass_durn
b,a = signal.butter(1, freq/(fs*0.5),'lowpass')
envelope = signal.filtfilt(b,a,envelope)
if not kwargs.get('threshold_db_floor', None) is None:
floor_level = np.percentile(20*np.log10(envelope),5)
threshold_db = floor_level + kwargs['threshold_db_floor']
else:
# get regions above the threshold
threshold_db = kwargs['threshold_dbpeak']
linear_threshold = 10**(threshold_db/20)
labelled, num_detections = ndimage.label(envelope>=linear_threshold)
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs ) for each in regions_above]
return regions_above_timestamps
def get_start_stop_times(findobjects_tuple, fs):
'''
'''
only_tuple = findobjects_tuple[0]
start, stop = only_tuple.start/fs, only_tuple.stop/fs
return start, stop
def moving_rms(X, **kwargs):
'''Calculates moving rms of a signal with given window size.
Outputs np.array of *same* size as X. The rms of the
last few samples <= window_size away from the end are assigned
to last full-window rms calculated
Parameters
----------
X : np.array
Signal of interest.
window_size : int, optional
Defaults to 125 samples.
Returns
-------
all_rms : np.array
Moving rms of the signal.
'''
window_size = kwargs.get('window_size', 125)
starts = np.arange(0, X.size)
stops = starts+window_size
valid = stops<X.size
valid_starts = np.int32(starts[valid])
valid_stops = np.int32(stops[valid])
all_rms = np.ones(X.size).reshape(-1,1)*999
for i, (start, stop) in enumerate(zip(valid_starts, valid_stops)):
rms_value = rms(X[start:stop])
all_rms[i] = rms_value
# replace all un-assigned samples with the last rms value
all_rms[all_rms==999] = np.nan
return all_rms
#
#if __name__ == '__main__':
# import scipy.signal as signal
# # trying out the hilbert envelope method:
# fs = 250000
# background = -60 # dB rms
# audio = np.random.normal(0, 10**(background/20), fs)
# duration = 0.005
# sound_start = 0.05
# t = np.linspace(0, duration, int(fs*duration))
# bat_call = signal.chirp(t,90000, 25000, t[-1])
# bat_call *= 0.5
# sound_stop = sound_start+duration
#
# start, end = np.int32(np.array([sound_start,
# sound_stop])*fs)
# audio[start:end] += bat_call
#
# envelope = np.abs(signal.hilbert(audio))
#
# dets = envelope_detector(audio, fs, threshold_dbpeak=-20)
# print(dets)
##
|
input_fn
|
Creates tensorflow dataset using the generator_fn
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:param params: if not None then model hyperparameters expected - 'buffer' (as in buffer size) and 'epochs'
:param shuffle_and_repeat: if the input is to be shuffled and repeat-delivered (say per epoch)
:return: instance of tf.data.Dataset
|
# reimplementation of https://github.com/guillaumegenthial/tf_ner/blob/master/models/lstm_crf/main.py
import functools
import json
import logging
from pathlib import Path
import sys
import numpy as np
import tensorflow as tf
# tf.enable_eager_execution()
from tf_metrics import precision, recall, f1
DATADIR = "../../../data/conll/"
# Setup Logging
Path('results').mkdir(exist_ok=True)
tf.logging.set_verbosity(logging.INFO)
handlers = [ logging.FileHandler('results/main.log'), logging.StreamHandler(sys.stdout)]
logging.getLogger('tensorflow').handlers = handlers
# Data Pipeline
def parse_fn(line_words, line_tags):
"""Encodes words into bytes for tensor
:param line_words: one line with words (aka sentences) with space between each word/token
:param line_tags: one line of tags (one tag per word in line_words)
:return: (list of encoded words, len(words)), list of encoded tags
"""
words = [w.encode() for w in line_words.strip().split()]
tags = [t.encode() for t in line_tags.strip().split()]
assert len(words) == len(tags), "Number of words {} and Number of tags must be the same {}".format(len(words), len(tags))
return (words, len(words)), tags
def generator_fn(words_file, tags_file):
"""Enumerator to enumerate through words_file and associated tags_file one line at a time
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:return enumerator that enumerates over the format (words, len(words)), tags one line at a time from input files.
"""
with Path(words_file).open('r') as f_words, Path(tags_file).open('r') as f_tags:
for line_words, line_tags in zip(f_words, f_tags):
yield parse_fn(line_words, line_tags)
# MASKED: input_fn function (lines 48-74)
def model_fn(features, labels, mode, params):
"""
:param features: words from sentence and number of words per sentence
:param labels: One tag per word
:param mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.PREDICT or tf.estimator.ModeKeys.EVAL
:param params: dictionary of hyper parameters for the model
:return:
"""
# For serving, features are a bit different
if isinstance(features, dict):
features = features['words'], features['nwords']
# Read vocab_words_file, vocab_tags_file, features
words, nwords = features
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = tf.contrib.lookup.index_table_from_file(params['vocab_words_file'], num_oov_buckets = params['num_oov_buckets'])
'''
If the file contains the following:
B-LOC
B-PER
O
I-LOC
then indices = [0, 1, 3] and num_tags = 4
Open Question: The special treatment of tag indices is probably needed for microavg metrics. Why though?
'''
with Path(params['vocab_tags_file']).open('r') as f:
indices = [idx for idx, tag in enumerate(f) if tag.strip() != 'O']
num_tags = len(indices) + 1
# Word Embeddings
# remember - as per the parse function "words" is a python list of
word_ids = vocab_words.lookup(words)
glove = np.load(params['glove'])['embeddings']
glove = np.vstack([glove, [[0.]*params['dim']]])
variable = tf.Variable(glove, dtype=tf.float32, trainable=False)
embeddings = tf.nn.embedding_lookup(variable, word_ids)
dropout = params['dropout']
embeddings = tf.layers.dropout(embeddings, rate = dropout, training = training)
# LSTM CRF
time_major = tf.transpose(embeddings, perm = [1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
"""
Any LSTM Cell returns two things: Cell Output (h) and Cell State (c)
Following this, lstm_fw or lstm_bw each return a pair containing:
Cell Output: A 3-D tensor of shape [time_len, batch_size, output_size]
Final state: a tuple (cell_state, output) produced by the last LSTM Cell in the sequence.
"""
output_fw,_ = lstm_cell_fw(time_major, dtype = tf.float32, sequence_length = nwords)
output_bw,_ = lstm_cell_bw(time_major, dtype = tf.float32, sequence_length = nwords)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.transpose(output, perm=[1, 0, 2])
output = tf.layers.dropout(output, rate=dropout, training=training)
# CRf
logits = tf.layers.dense(output, num_tags)
crf_params = tf.get_variable('crf', shape = [num_tags, num_tags], dtype = tf.float32)
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords) # pred_ids = A [batch_size, max_seq_len] matrix, with dtype tf.int32.
# Prediction mode
if mode == tf.estimator.ModeKeys.PREDICT:
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_file(params['vocab_tags_file'])
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
predictions = {'pred_ids': pred_ids, 'tags': pred_strings}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Loss
vocab_tags = tf.contrib.lookup.index_table_from_file(params['vocab_tags_file'])
label_ids = vocab_tags.lookup(labels)
"""
logits are the same thing as unary potentials,
checkout https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html look for scores s[i]
"""
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(logits, label_ids, nwords, crf_params)
loss = tf.reduce_mean(-log_likelihood)
# metrics
weights = tf.sequence_mask(nwords)
metrics = {
'acc': tf.metrics.accuracy(label_ids, pred_ids, weights),
'precision': precision(label_ids, pred_ids, num_tags, indices, weights), # indices indicate non-null classes
'recall': recall(label_ids, pred_ids, num_tags, indices, weights),
'f1': f1(label_ids, pred_ids, num_tags, indices, weights),
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
# Evaluation Mode or training mode
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss = loss, eval_metric_ops = metrics )
elif mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.train.AdamOptimizer().minimize(loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode, loss = loss, train_op = train_op)
def fwords(name):
return str(Path(DATADIR, '{}.words.txt'.format(name)))
def ftags(name):
return str(Path(DATADIR, '{}.tags.txt'.format(name)))
# Write predictions to file
def write_predictions(name, estimator):
Path('results/score').mkdir(parents=True, exist_ok=True)
with Path('results/score/{}.preds.txt'.format(name)).open('wb') as f:
test_inpf = functools.partial(input_fn, fwords(name), ftags(name))
golds_gen = generator_fn(fwords(name), ftags(name))
preds_gen = estimator.predict(test_inpf)
for golds, preds in zip(golds_gen, preds_gen):
((words, _), tags) = golds
for word, tag, tag_pred in zip(words, tags, preds['tags']):
f.write(b' '.join([word, tag, tag_pred]) + b'\n')
f.write(b'\n')
if __name__ == '__main__':
# Params
params = {
'dim': 300,
'dropout': 0.5,
'num_oov_buckets': 1,
'epochs': 25,
'batch_size': 20,
'buffer': 15000,
'lstm_size': 100,
'vocab_words_file': str(Path(DATADIR, 'vocab.words.txt')),
'vocab_chars_file': str(Path(DATADIR, 'vocab.chars.txt')),
'vocab_tags_file': str(Path(DATADIR, 'vocab.tags.txt')),
'glove': str(Path(DATADIR, 'glove.npz'))
}
with Path('results/params.json').open('w') as f:
json.dump(params, f, indent=4, sort_keys=True)
print('Done writing params to disk')
# Run configuration and estimator
cfg = tf.estimator.RunConfig(save_checkpoints_secs=120)
estimator = tf.estimator.Estimator(model_fn, 'results/model', cfg, params)
print('Done creating estimator spec')
# Defining our input functions
train_inpf = functools.partial(input_fn, fwords('train'), ftags('train'), params, shuffle_and_repeat=True)
eval_inpf = functools.partial(input_fn, fwords('testa'), ftags('testa'))
# Create an early stopping hook
Path(estimator.eval_dir()).mkdir(parents=True, exist_ok=True)
"""
Ref: https://stackoverflow.com/questions/47137061/early-stopping-with-tf-estimator-how
The parameters for stop_if_no_decrease_hook are as follows:
tf.contrib.estimator.stop_if_no_decrease_hook(
estimator,
metric_name='loss',
max_steps_without_decrease=1000,
min_steps=100)
"""
hook = tf.contrib.estimator.stop_if_no_increase_hook(estimator, 'f1', 500, min_steps=8000, run_every_secs=120)
train_spec = tf.estimator.TrainSpec(input_fn = train_inpf, hooks = [hook])
eval_spec = tf.estimator.EvalSpec(input_fn = eval_inpf, throttle_secs = 120) # Evaluate every 120 seconds
print('Done creating train and eval spec')
# Train with early stopping
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
print('Done training and evaluation')
for name in ['train', 'testa', 'testb']:
write_predictions(name, estimator)
|
def input_fn(words_file, tags_file, params = None, shuffle_and_repeat = False):
"""Creates tensorflow dataset using the generator_fn
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:param params: if not None then model hyperparameters expected - 'buffer' (as in buffer size) and 'epochs'
:param shuffle_and_repeat: if the input is to be shuffled and repeat-delivered (say per epoch)
:return: instance of tf.data.Dataset
"""
params = params if params is not None else {}
# shapes are analogous to (list of encoded words, len(words)), list of encoded tags
shapes = (([None], ()), [None])
types = ((tf.string, tf.int32), tf.string)
defaults = (('<pad>', 0), 'O')
generator = functools.partial(generator_fn, words_file, tags_file)
dataset = tf.data.Dataset.from_generator(generator, output_shapes = shapes, output_types = types)
if shuffle_and_repeat:
dataset = dataset.shuffle(params['buffer']).repeat(params['epochs'])
dataset = dataset.padded_batch(params.get('batch_size', 20), shapes, defaults).prefetch(1)\
return dataset
| 48
| 74
|
# reimplementation of https://github.com/guillaumegenthial/tf_ner/blob/master/models/lstm_crf/main.py
import functools
import json
import logging
from pathlib import Path
import sys
import numpy as np
import tensorflow as tf
# tf.enable_eager_execution()
from tf_metrics import precision, recall, f1
DATADIR = "../../../data/conll/"
# Setup Logging
Path('results').mkdir(exist_ok=True)
tf.logging.set_verbosity(logging.INFO)
handlers = [ logging.FileHandler('results/main.log'), logging.StreamHandler(sys.stdout)]
logging.getLogger('tensorflow').handlers = handlers
# Data Pipeline
def parse_fn(line_words, line_tags):
"""Encodes words into bytes for tensor
:param line_words: one line with words (aka sentences) with space between each word/token
:param line_tags: one line of tags (one tag per word in line_words)
:return: (list of encoded words, len(words)), list of encoded tags
"""
words = [w.encode() for w in line_words.strip().split()]
tags = [t.encode() for t in line_tags.strip().split()]
assert len(words) == len(tags), "Number of words {} and Number of tags must be the same {}".format(len(words), len(tags))
return (words, len(words)), tags
def generator_fn(words_file, tags_file):
"""Enumerator to enumerate through words_file and associated tags_file one line at a time
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:return enumerator that enumerates over the format (words, len(words)), tags one line at a time from input files.
"""
with Path(words_file).open('r') as f_words, Path(tags_file).open('r') as f_tags:
for line_words, line_tags in zip(f_words, f_tags):
yield parse_fn(line_words, line_tags)
def input_fn(words_file, tags_file, params = None, shuffle_and_repeat = False):
"""Creates tensorflow dataset using the generator_fn
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:param params: if not None then model hyperparameters expected - 'buffer' (as in buffer size) and 'epochs'
:param shuffle_and_repeat: if the input is to be shuffled and repeat-delivered (say per epoch)
:return: instance of tf.data.Dataset
"""
params = params if params is not None else {}
# shapes are analogous to (list of encoded words, len(words)), list of encoded tags
shapes = (([None], ()), [None])
types = ((tf.string, tf.int32), tf.string)
defaults = (('<pad>', 0), 'O')
generator = functools.partial(generator_fn, words_file, tags_file)
dataset = tf.data.Dataset.from_generator(generator, output_shapes = shapes, output_types = types)
if shuffle_and_repeat:
dataset = dataset.shuffle(params['buffer']).repeat(params['epochs'])
dataset = dataset.padded_batch(params.get('batch_size', 20), shapes, defaults).prefetch(1)\
return dataset
def model_fn(features, labels, mode, params):
"""
:param features: words from sentence and number of words per sentence
:param labels: One tag per word
:param mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.PREDICT or tf.estimator.ModeKeys.EVAL
:param params: dictionary of hyper parameters for the model
:return:
"""
# For serving, features are a bit different
if isinstance(features, dict):
features = features['words'], features['nwords']
# Read vocab_words_file, vocab_tags_file, features
words, nwords = features
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = tf.contrib.lookup.index_table_from_file(params['vocab_words_file'], num_oov_buckets = params['num_oov_buckets'])
'''
If the file contains the following:
B-LOC
B-PER
O
I-LOC
then indices = [0, 1, 3] and num_tags = 4
Open Question: The special treatment of tag indices is probably needed for microavg metrics. Why though?
'''
with Path(params['vocab_tags_file']).open('r') as f:
indices = [idx for idx, tag in enumerate(f) if tag.strip() != 'O']
num_tags = len(indices) + 1
# Word Embeddings
# remember - as per the parse function "words" is a python list of
word_ids = vocab_words.lookup(words)
glove = np.load(params['glove'])['embeddings']
glove = np.vstack([glove, [[0.]*params['dim']]])
variable = tf.Variable(glove, dtype=tf.float32, trainable=False)
embeddings = tf.nn.embedding_lookup(variable, word_ids)
dropout = params['dropout']
embeddings = tf.layers.dropout(embeddings, rate = dropout, training = training)
# LSTM CRF
time_major = tf.transpose(embeddings, perm = [1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
"""
Any LSTM Cell returns two things: Cell Output (h) and Cell State (c)
Following this, lstm_fw or lstm_bw each return a pair containing:
Cell Output: A 3-D tensor of shape [time_len, batch_size, output_size]
Final state: a tuple (cell_state, output) produced by the last LSTM Cell in the sequence.
"""
output_fw,_ = lstm_cell_fw(time_major, dtype = tf.float32, sequence_length = nwords)
output_bw,_ = lstm_cell_bw(time_major, dtype = tf.float32, sequence_length = nwords)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.transpose(output, perm=[1, 0, 2])
output = tf.layers.dropout(output, rate=dropout, training=training)
# CRf
logits = tf.layers.dense(output, num_tags)
crf_params = tf.get_variable('crf', shape = [num_tags, num_tags], dtype = tf.float32)
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords) # pred_ids = A [batch_size, max_seq_len] matrix, with dtype tf.int32.
# Prediction mode
if mode == tf.estimator.ModeKeys.PREDICT:
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_file(params['vocab_tags_file'])
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
predictions = {'pred_ids': pred_ids, 'tags': pred_strings}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Loss
vocab_tags = tf.contrib.lookup.index_table_from_file(params['vocab_tags_file'])
label_ids = vocab_tags.lookup(labels)
"""
logits are the same thing as unary potentials,
checkout https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html look for scores s[i]
"""
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(logits, label_ids, nwords, crf_params)
loss = tf.reduce_mean(-log_likelihood)
# metrics
weights = tf.sequence_mask(nwords)
metrics = {
'acc': tf.metrics.accuracy(label_ids, pred_ids, weights),
'precision': precision(label_ids, pred_ids, num_tags, indices, weights), # indices indicate non-null classes
'recall': recall(label_ids, pred_ids, num_tags, indices, weights),
'f1': f1(label_ids, pred_ids, num_tags, indices, weights),
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
# Evaluation Mode or training mode
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss = loss, eval_metric_ops = metrics )
elif mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.train.AdamOptimizer().minimize(loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode, loss = loss, train_op = train_op)
def fwords(name):
return str(Path(DATADIR, '{}.words.txt'.format(name)))
def ftags(name):
return str(Path(DATADIR, '{}.tags.txt'.format(name)))
# Write predictions to file
def write_predictions(name, estimator):
Path('results/score').mkdir(parents=True, exist_ok=True)
with Path('results/score/{}.preds.txt'.format(name)).open('wb') as f:
test_inpf = functools.partial(input_fn, fwords(name), ftags(name))
golds_gen = generator_fn(fwords(name), ftags(name))
preds_gen = estimator.predict(test_inpf)
for golds, preds in zip(golds_gen, preds_gen):
((words, _), tags) = golds
for word, tag, tag_pred in zip(words, tags, preds['tags']):
f.write(b' '.join([word, tag, tag_pred]) + b'\n')
f.write(b'\n')
if __name__ == '__main__':
# Params
params = {
'dim': 300,
'dropout': 0.5,
'num_oov_buckets': 1,
'epochs': 25,
'batch_size': 20,
'buffer': 15000,
'lstm_size': 100,
'vocab_words_file': str(Path(DATADIR, 'vocab.words.txt')),
'vocab_chars_file': str(Path(DATADIR, 'vocab.chars.txt')),
'vocab_tags_file': str(Path(DATADIR, 'vocab.tags.txt')),
'glove': str(Path(DATADIR, 'glove.npz'))
}
with Path('results/params.json').open('w') as f:
json.dump(params, f, indent=4, sort_keys=True)
print('Done writing params to disk')
# Run configuration and estimator
cfg = tf.estimator.RunConfig(save_checkpoints_secs=120)
estimator = tf.estimator.Estimator(model_fn, 'results/model', cfg, params)
print('Done creating estimator spec')
# Defining our input functions
train_inpf = functools.partial(input_fn, fwords('train'), ftags('train'), params, shuffle_and_repeat=True)
eval_inpf = functools.partial(input_fn, fwords('testa'), ftags('testa'))
# Create an early stopping hook
Path(estimator.eval_dir()).mkdir(parents=True, exist_ok=True)
"""
Ref: https://stackoverflow.com/questions/47137061/early-stopping-with-tf-estimator-how
The parameters for stop_if_no_decrease_hook are as follows:
tf.contrib.estimator.stop_if_no_decrease_hook(
estimator,
metric_name='loss',
max_steps_without_decrease=1000,
min_steps=100)
"""
hook = tf.contrib.estimator.stop_if_no_increase_hook(estimator, 'f1', 500, min_steps=8000, run_every_secs=120)
train_spec = tf.estimator.TrainSpec(input_fn = train_inpf, hooks = [hook])
eval_spec = tf.estimator.EvalSpec(input_fn = eval_inpf, throttle_secs = 120) # Evaluate every 120 seconds
print('Done creating train and eval spec')
# Train with early stopping
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
print('Done training and evaluation')
for name in ['train', 'testa', 'testb']:
write_predictions(name, estimator)
|
chart_descriptions
|
:param chart: Chart: The alt chart to be used in finding max points
:param suffix: str: The assumption is that the charts have similar column names.
The census chart adds " Census" to the column names.
Make sure to include a space or underscore as appropriate
:return: str: Returns a multi-line string description of the results
|
from math import ceil
import datetime
from altair import Chart # type: ignore
import pandas as pd # type: ignore
import numpy as np
from .parameters import Parameters
from .utils import add_date_column
from .presentation import DATE_FORMAT
def new_admissions_chart(
alt, projection_admits: pd.DataFrame, parameters: Parameters
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
tooltip_dict = {False: "day", True: "date:T"}
if as_date:
projection_admits = add_date_column(projection_admits)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
# TODO fix the fold to allow any number of dispositions
ceiled_admits = projection_admits.copy()
ceiled_admits.hospitalized = np.ceil(ceiled_admits.hospitalized)
ceiled_admits.icu = np.ceil(ceiled_admits.icu)
ceiled_admits.ventilated = np.ceil(ceiled_admits.ventilated)
return (
alt.Chart(ceiled_admits.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Daily admissions", scale=y_scale),
color="key:N",
tooltip=[
tooltip_dict[as_date],
alt.Tooltip("value:Q", format=".0f", title="Admissions"),
"key:N",
],
)
.interactive()
)
def admitted_patients_chart(
alt, census: pd.DataFrame, parameters: Parameters
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
if as_date:
census = add_date_column(census)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
idx = "date:T"
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
idx = "day"
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
# TODO fix the fold to allow any number of dispositions
return (
alt.Chart(census.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Census", scale=y_scale),
color="key:N",
tooltip=[
idx,
alt.Tooltip("value:Q", format=".0f", title="Census"),
"key:N",
],
)
.interactive()
)
def additional_projections_chart(
alt, model, parameters
) -> Chart:
# TODO use subselect of df_raw instead of creating a new df
raw_df = model.raw_df
dat = pd.DataFrame({
"infected": raw_df.infected,
"recovered": raw_df.recovered
})
dat["day"] = dat.index
as_date = parameters.as_date
max_y_axis = parameters.max_y_axis
if as_date:
dat = add_date_column(dat)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
return (
alt.Chart(dat)
.transform_fold(fold=["infected", "recovered"])
.mark_line()
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Case Volume", scale=y_scale),
tooltip=["key:N", "value:Q"],
color="key:N",
)
.interactive()
)
# MASKED: chart_descriptions function (lines 140-177)
|
def chart_descriptions(chart: Chart, labels, suffix: str = ""):
"""
:param chart: Chart: The alt chart to be used in finding max points
:param suffix: str: The assumption is that the charts have similar column names.
The census chart adds " Census" to the column names.
Make sure to include a space or underscore as appropriate
:return: str: Returns a multi-line string description of the results
"""
messages = []
cols = ["hospitalized", "icu", "ventilated"]
asterisk = False
day = "date" if "date" in chart.data.columns else "day"
for col in cols:
if chart.data[col].idxmax() + 1 == len(chart.data):
asterisk = True
on = chart.data[day][chart.data[col].idxmax()]
if day == "date":
on = datetime.datetime.strftime(on, "%b %d") # todo: bring this to an optional arg / i18n
else:
on += 1 # 0 index issue
messages.append(
"{}{} peaks at {:,} on day {}{}".format(
labels[col],
suffix,
ceil(chart.data[col].max()),
on,
"*" if asterisk else "",
)
)
if asterisk:
messages.append("_* The max is at the upper bound of the data, and therefore may not be the actual max_")
return "\n\n".join(messages)
| 140
| 177
|
from math import ceil
import datetime
from altair import Chart # type: ignore
import pandas as pd # type: ignore
import numpy as np
from .parameters import Parameters
from .utils import add_date_column
from .presentation import DATE_FORMAT
def new_admissions_chart(
alt, projection_admits: pd.DataFrame, parameters: Parameters
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
tooltip_dict = {False: "day", True: "date:T"}
if as_date:
projection_admits = add_date_column(projection_admits)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
# TODO fix the fold to allow any number of dispositions
ceiled_admits = projection_admits.copy()
ceiled_admits.hospitalized = np.ceil(ceiled_admits.hospitalized)
ceiled_admits.icu = np.ceil(ceiled_admits.icu)
ceiled_admits.ventilated = np.ceil(ceiled_admits.ventilated)
return (
alt.Chart(ceiled_admits.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Daily admissions", scale=y_scale),
color="key:N",
tooltip=[
tooltip_dict[as_date],
alt.Tooltip("value:Q", format=".0f", title="Admissions"),
"key:N",
],
)
.interactive()
)
def admitted_patients_chart(
alt, census: pd.DataFrame, parameters: Parameters
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
if as_date:
census = add_date_column(census)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
idx = "date:T"
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
idx = "day"
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
# TODO fix the fold to allow any number of dispositions
return (
alt.Chart(census.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Census", scale=y_scale),
color="key:N",
tooltip=[
idx,
alt.Tooltip("value:Q", format=".0f", title="Census"),
"key:N",
],
)
.interactive()
)
def additional_projections_chart(
alt, model, parameters
) -> Chart:
# TODO use subselect of df_raw instead of creating a new df
raw_df = model.raw_df
dat = pd.DataFrame({
"infected": raw_df.infected,
"recovered": raw_df.recovered
})
dat["day"] = dat.index
as_date = parameters.as_date
max_y_axis = parameters.max_y_axis
if as_date:
dat = add_date_column(dat)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
return (
alt.Chart(dat)
.transform_fold(fold=["infected", "recovered"])
.mark_line()
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Case Volume", scale=y_scale),
tooltip=["key:N", "value:Q"],
color="key:N",
)
.interactive()
)
def chart_descriptions(chart: Chart, labels, suffix: str = ""):
"""
:param chart: Chart: The alt chart to be used in finding max points
:param suffix: str: The assumption is that the charts have similar column names.
The census chart adds " Census" to the column names.
Make sure to include a space or underscore as appropriate
:return: str: Returns a multi-line string description of the results
"""
messages = []
cols = ["hospitalized", "icu", "ventilated"]
asterisk = False
day = "date" if "date" in chart.data.columns else "day"
for col in cols:
if chart.data[col].idxmax() + 1 == len(chart.data):
asterisk = True
on = chart.data[day][chart.data[col].idxmax()]
if day == "date":
on = datetime.datetime.strftime(on, "%b %d") # todo: bring this to an optional arg / i18n
else:
on += 1 # 0 index issue
messages.append(
"{}{} peaks at {:,} on day {}{}".format(
labels[col],
suffix,
ceil(chart.data[col].max()),
on,
"*" if asterisk else "",
)
)
if asterisk:
messages.append("_* The max is at the upper bound of the data, and therefore may not be the actual max_")
return "\n\n".join(messages)
|
batch_loss
|
Calculates the loss for the given batch.
Args:
batch_in: The batch.
Returns:
The loss of the given batch.
|
from __future__ import annotations
import copy
import logging
from collections import defaultdict
from pathlib import Path
from rasa.nlu.featurizers.featurizer import Featurizer
import numpy as np
import scipy.sparse
import tensorflow as tf
from typing import Any, Dict, List, Optional, Text, Tuple, Union, Type
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.extractors.extractor import EntityExtractorMixin
from rasa.nlu.classifiers.classifier import IntentClassifier
import rasa.shared.utils.io
import rasa.utils.io as io_utils
import rasa.nlu.utils.bilou_utils as bilou_utils
from rasa.shared.constants import DIAGNOSTIC_DATA
from rasa.nlu.extractors.extractor import EntityTagSpec
from rasa.nlu.classifiers import LABEL_RANKING_LENGTH
from rasa.utils import train_utils
from rasa.utils.tensorflow import rasa_layers
from rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel
from rasa.utils.tensorflow.model_data import (
RasaModelData,
FeatureSignature,
FeatureArray,
)
from rasa.nlu.constants import TOKENS_NAMES, DEFAULT_TRANSFORMER_SIZE
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
TEXT,
INTENT,
INTENT_RESPONSE_KEY,
ENTITIES,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_GROUP,
ENTITY_ATTRIBUTE_ROLE,
NO_ENTITY_TAG,
SPLIT_ENTITIES_BY_COMMA,
)
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.utils.tensorflow.constants import (
LABEL,
IDS,
HIDDEN_LAYERS_SIZES,
RENORMALIZE_CONFIDENCES,
SHARE_HIDDEN_LAYERS,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
LEARNING_RATE,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
TENSORBOARD_LOG_DIR,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
UNIDIRECTIONAL_ENCODER,
DROP_RATE,
DROP_RATE_ATTENTION,
CONNECTION_DENSITY,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
BILOU_FLAG,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
AUTO,
BALANCED,
CROSS_ENTROPY,
TENSORBOARD_LOG_LEVEL,
CONCAT_DIMENSION,
FEATURIZERS,
CHECKPOINT_MODEL,
SEQUENCE,
SENTENCE,
SEQUENCE_LENGTH,
DENSE_DIMENSION,
MASK,
CONSTRAIN_SIMILARITIES,
MODEL_CONFIDENCE,
SOFTMAX,
)
logger = logging.getLogger(__name__)
SPARSE = "sparse"
DENSE = "dense"
LABEL_KEY = LABEL
LABEL_SUB_KEY = IDS
POSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]
@DefaultV1Recipe.register(
[
DefaultV1Recipe.ComponentType.INTENT_CLASSIFIER,
DefaultV1Recipe.ComponentType.ENTITY_EXTRACTOR,
],
is_trainable=True,
)
class DIETClassifier(GraphComponent, IntentClassifier, EntityExtractorMixin):
"""A multi-task model for intent classification and entity extraction.
DIET is Dual Intent and Entity Transformer.
The architecture is based on a transformer which is shared for both tasks.
A sequence of entity labels is predicted through a Conditional Random Field (CRF)
tagging layer on top of the transformer output sequence corresponding to the
input sequence of tokens. The transformer output for the ``__CLS__`` token and
intent labels are embedded into a single semantic vector space. We use the
dot-product loss to maximize the similarity with the target label and minimize
similarities with negative samples.
"""
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [Featurizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""The component's default config (see parent class for full docstring)."""
# please make sure to update the docs when changing a default parameter
return {
# ## Architecture of the used neural network
# Hidden layer sizes for layers before the embedding layers for user message
# and labels.
# The number of hidden layers is equal to the length of the corresponding
# list.
HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []},
# Whether to share the hidden layer weights between user message and labels.
SHARE_HIDDEN_LAYERS: False,
# Number of units in transformer
TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE,
# Number of transformer layers
NUM_TRANSFORMER_LAYERS: 2,
# Number of attention heads in transformer
NUM_HEADS: 4,
# If 'True' use key relative embeddings in attention
KEY_RELATIVE_ATTENTION: False,
# If 'True' use value relative embeddings in attention
VALUE_RELATIVE_ATTENTION: False,
# Max position for relative embeddings. Only in effect if key- or value
# relative attention are turned on
MAX_RELATIVE_POSITION: 5,
# Use a unidirectional or bidirectional encoder.
UNIDIRECTIONAL_ENCODER: False,
# ## Training parameters
# Initial and final batch sizes:
# Batch size will be linearly increased for each epoch.
BATCH_SIZES: [64, 256],
# Strategy used when creating batches.
# Can be either 'sequence' or 'balanced'.
BATCH_STRATEGY: BALANCED,
# Number of epochs to train
EPOCHS: 300,
# Set random seed to any 'int' to get reproducible results
RANDOM_SEED: None,
# Initial learning rate for the optimizer
LEARNING_RATE: 0.001,
# ## Parameters for embeddings
# Dimension size of embedding vectors
EMBEDDING_DIMENSION: 20,
# Dense dimension to use for sparse features.
DENSE_DIMENSION: {TEXT: 128, LABEL: 20},
# Default dimension to use for concatenating sequence and sentence features.
CONCAT_DIMENSION: {TEXT: 128, LABEL: 20},
# The number of incorrect labels. The algorithm will minimize
# their similarity to the user input during training.
NUM_NEG: 20,
# Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.
SIMILARITY_TYPE: AUTO,
# The type of the loss function, either 'cross_entropy' or 'margin'.
LOSS_TYPE: CROSS_ENTROPY,
# Number of top intents for which confidences should be reported.
# Set to 0 if confidences for all intents should be reported.
RANKING_LENGTH: LABEL_RANKING_LENGTH,
# Indicates how similar the algorithm should try to make embedding vectors
# for correct labels.
# Should be 0.0 < ... < 1.0 for 'cosine' similarity type.
MAX_POS_SIM: 0.8,
# Maximum negative similarity for incorrect labels.
# Should be -1.0 < ... < 1.0 for 'cosine' similarity type.
MAX_NEG_SIM: -0.4,
# If 'True' the algorithm only minimizes maximum similarity over
# incorrect intent labels, used only if 'loss_type' is set to 'margin'.
USE_MAX_NEG_SIM: True,
# If 'True' scale loss inverse proportionally to the confidence
# of the correct prediction
SCALE_LOSS: False,
# ## Regularization parameters
# The scale of regularization
REGULARIZATION_CONSTANT: 0.002,
# The scale of how important is to minimize the maximum similarity
# between embeddings of different labels,
# used only if 'loss_type' is set to 'margin'.
NEGATIVE_MARGIN_SCALE: 0.8,
# Dropout rate for encoder
DROP_RATE: 0.2,
# Dropout rate for attention
DROP_RATE_ATTENTION: 0,
# Fraction of trainable weights in internal layers.
CONNECTION_DENSITY: 0.2,
# If 'True' apply dropout to sparse input tensors
SPARSE_INPUT_DROPOUT: True,
# If 'True' apply dropout to dense input tensors
DENSE_INPUT_DROPOUT: True,
# ## Evaluation parameters
# How often calculate validation accuracy.
# Small values may hurt performance.
EVAL_NUM_EPOCHS: 20,
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
# Set to 0 for no validation.
EVAL_NUM_EXAMPLES: 0,
# ## Model config
# If 'True' intent classification is trained and intent predicted.
INTENT_CLASSIFICATION: True,
# If 'True' named entity recognition is trained and entities predicted.
ENTITY_RECOGNITION: True,
# If 'True' random tokens of the input message will be masked and the model
# should predict those tokens.
MASKED_LM: False,
# 'BILOU_flag' determines whether to use BILOU tagging or not.
# If set to 'True' labelling is more rigorous, however more
# examples per entity are required.
# Rule of thumb: you should have more than 100 examples per entity.
BILOU_FLAG: True,
# If you want to use tensorboard to visualize training and validation
# metrics, set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'batch'
TENSORBOARD_LOG_LEVEL: "epoch",
# Perform model checkpointing
CHECKPOINT_MODEL: False,
# Specify what features to use as sequence and sentence features
# By default all features in the pipeline are used.
FEATURIZERS: [],
# Split entities by comma, this makes sense e.g. for a list of ingredients
# in a recipie, but it doesn't make sense for the parts of an address
SPLIT_ENTITIES_BY_COMMA: True,
# If 'True' applies sigmoid on all similarity terms and adds
# it to the loss function to ensure that similarity values are
# approximately bounded. Used inside cross-entropy loss only.
CONSTRAIN_SIMILARITIES: False,
# Model confidence to be returned during inference. Currently, the only
# possible value is `softmax`.
MODEL_CONFIDENCE: SOFTMAX,
# Determines whether the confidences of the chosen top intents should be
# renormalized so that they sum up to 1. By default, we do not renormalize
# and return the confidences for the top intents as is.
# Note that renormalization only makes sense if confidences are generated
# via `softmax`.
RENORMALIZE_CONFIDENCES: False,
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
index_label_id_mapping: Optional[Dict[int, Text]] = None,
entity_tag_specs: Optional[List[EntityTagSpec]] = None,
model: Optional[RasaModel] = None,
sparse_feature_sizes: Optional[Dict[Text, Dict[Text, List[int]]]] = None,
) -> None:
"""Declare instance variables with default values."""
if EPOCHS not in config:
rasa.shared.utils.io.raise_warning(
f"Please configure the number of '{EPOCHS}' in your configuration file."
f" We will change the default value of '{EPOCHS}' in the future to 1. "
)
self.component_config = config
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
self._check_config_parameters()
# transform numbers to labels
self.index_label_id_mapping = index_label_id_mapping or {}
self._entity_tag_specs = entity_tag_specs
self.model = model
self.tmp_checkpoint_dir = None
if self.component_config[CHECKPOINT_MODEL]:
self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())
self._label_data: Optional[RasaModelData] = None
self._data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None
self.split_entities_config = rasa.utils.train_utils.init_split_entities(
self.component_config[SPLIT_ENTITIES_BY_COMMA],
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
)
self.finetune_mode = self._execution_context.is_finetuning
self._sparse_feature_sizes = sparse_feature_sizes
# init helpers
def _check_masked_lm(self) -> None:
if (
self.component_config[MASKED_LM]
and self.component_config[NUM_TRANSFORMER_LAYERS] == 0
):
raise ValueError(
f"If number of transformer layers is 0, "
f"'{MASKED_LM}' option should be 'False'."
)
def _check_share_hidden_layers_sizes(self) -> None:
if self.component_config.get(SHARE_HIDDEN_LAYERS):
first_hidden_layer_sizes = next(
iter(self.component_config[HIDDEN_LAYERS_SIZES].values())
)
# check that all hidden layer sizes are the same
identical_hidden_layer_sizes = all(
current_hidden_layer_sizes == first_hidden_layer_sizes
for current_hidden_layer_sizes in self.component_config[
HIDDEN_LAYERS_SIZES
].values()
)
if not identical_hidden_layer_sizes:
raise ValueError(
f"If hidden layer weights are shared, "
f"{HIDDEN_LAYERS_SIZES} must coincide."
)
def _check_config_parameters(self) -> None:
self.component_config = train_utils.check_deprecated_options(
self.component_config
)
self._check_masked_lm()
self._check_share_hidden_layers_sizes()
self.component_config = train_utils.update_confidence_type(
self.component_config
)
train_utils.validate_configuration_settings(self.component_config)
self.component_config = train_utils.update_similarity_type(
self.component_config
)
self.component_config = train_utils.update_evaluation_parameters(
self.component_config
)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> DIETClassifier:
"""Creates a new untrained component (see parent class for full docstring)."""
return cls(config, model_storage, resource, execution_context)
@property
def label_key(self) -> Optional[Text]:
"""Return key if intent classification is activated."""
return LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@property
def label_sub_key(self) -> Optional[Text]:
"""Return sub key if intent classification is activated."""
return LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@staticmethod
def model_class() -> Type[RasaModel]:
return DIET
# training data helpers:
@staticmethod
def _label_id_index_mapping(
training_data: TrainingData, attribute: Text
) -> Dict[Text, int]:
"""Create label_id dictionary."""
distinct_label_ids = {
example.get(attribute) for example in training_data.intent_examples
} - {None}
return {
label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))
}
@staticmethod
def _invert_mapping(mapping: Dict) -> Dict:
return {value: key for key, value in mapping.items()}
def _create_entity_tag_specs(
self, training_data: TrainingData
) -> List[EntityTagSpec]:
"""Create entity tag specifications with their respective tag id mappings."""
_tag_specs = []
for tag_name in POSSIBLE_TAGS:
if self.component_config[BILOU_FLAG]:
tag_id_index_mapping = bilou_utils.build_tag_id_dict(
training_data, tag_name
)
else:
tag_id_index_mapping = self._tag_id_index_mapping_for(
tag_name, training_data
)
if tag_id_index_mapping:
_tag_specs.append(
EntityTagSpec(
tag_name=tag_name,
tags_to_ids=tag_id_index_mapping,
ids_to_tags=self._invert_mapping(tag_id_index_mapping),
num_tags=len(tag_id_index_mapping),
)
)
return _tag_specs
@staticmethod
def _tag_id_index_mapping_for(
tag_name: Text, training_data: TrainingData
) -> Optional[Dict[Text, int]]:
"""Create mapping from tag name to id."""
if tag_name == ENTITY_ATTRIBUTE_ROLE:
distinct_tags = training_data.entity_roles
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
distinct_tags = training_data.entity_groups
else:
distinct_tags = training_data.entities
distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None}
if not distinct_tags:
return None
tag_id_dict = {
tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1)
}
# NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index
# needed for correct prediction for padding
tag_id_dict[NO_ENTITY_TAG] = 0
return tag_id_dict
@staticmethod
def _find_example_for_label(
label: Text, examples: List[Message], attribute: Text
) -> Optional[Message]:
for ex in examples:
if ex.get(attribute) == label:
return ex
return None
def _check_labels_features_exist(
self, labels_example: List[Message], attribute: Text
) -> bool:
"""Checks if all labels have features set."""
return all(
label_example.features_present(
attribute, self.component_config[FEATURIZERS]
)
for label_example in labels_example
)
def _extract_features(
self, message: Message, attribute: Text
) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:
(
sparse_sequence_features,
sparse_sentence_features,
) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])
dense_sequence_features, dense_sentence_features = message.get_dense_features(
attribute, self.component_config[FEATURIZERS]
)
if dense_sequence_features is not None and sparse_sequence_features is not None:
if (
dense_sequence_features.features.shape[0]
!= sparse_sequence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sequence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
if dense_sentence_features is not None and sparse_sentence_features is not None:
if (
dense_sentence_features.features.shape[0]
!= sparse_sentence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sentence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
# If we don't use the transformer and we don't want to do entity recognition,
# to speed up training take only the sentence features as feature vector.
# We would not make use of the sequence anyway in this setup. Carrying over
# those features to the actual training process takes quite some time.
if (
self.component_config[NUM_TRANSFORMER_LAYERS] == 0
and not self.component_config[ENTITY_RECOGNITION]
and attribute not in [INTENT, INTENT_RESPONSE_KEY]
):
sparse_sequence_features = None
dense_sequence_features = None
out = {}
if sparse_sentence_features is not None:
out[f"{SPARSE}_{SENTENCE}"] = sparse_sentence_features.features
if sparse_sequence_features is not None:
out[f"{SPARSE}_{SEQUENCE}"] = sparse_sequence_features.features
if dense_sentence_features is not None:
out[f"{DENSE}_{SENTENCE}"] = dense_sentence_features.features
if dense_sequence_features is not None:
out[f"{DENSE}_{SEQUENCE}"] = dense_sequence_features.features
return out
def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:
"""Checks if features have same dimensionality if hidden layers are shared."""
if self.component_config.get(SHARE_HIDDEN_LAYERS):
num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)
num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)
num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)
num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)
if (0 < num_text_sentence_features != num_label_sentence_features > 0) or (
0 < num_text_sequence_features != num_label_sequence_features > 0
):
raise ValueError(
"If embeddings are shared text features and label features "
"must coincide. Check the output dimensions of previous components."
)
def _extract_labels_precomputed_features(
self, label_examples: List[Message], attribute: Text = INTENT
) -> Tuple[List[FeatureArray], List[FeatureArray]]:
"""Collects precomputed encodings."""
features = defaultdict(list)
for e in label_examples:
label_features = self._extract_features(e, attribute)
for feature_key, feature_value in label_features.items():
features[feature_key].append(feature_value)
sequence_features = []
sentence_features = []
for feature_name, feature_value in features.items():
if SEQUENCE in feature_name:
sequence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
else:
sentence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
return sequence_features, sentence_features
@staticmethod
def _compute_default_label_features(
labels_example: List[Message],
) -> List[FeatureArray]:
"""Computes one-hot representation for the labels."""
logger.debug("No label features found. Computing default label features.")
eye_matrix = np.eye(len(labels_example), dtype=np.float32)
# add sequence dimension to one-hot labels
return [
FeatureArray(
np.array([np.expand_dims(a, 0) for a in eye_matrix]),
number_of_dimensions=3,
)
]
def _create_label_data(
self,
training_data: TrainingData,
label_id_dict: Dict[Text, int],
attribute: Text,
) -> RasaModelData:
"""Create matrix with label_ids encoded in rows as bag of words.
Find a training example for each label and get the encoded features
from the corresponding Message object.
If the features are already computed, fetch them from the message object
else compute a one hot encoding for the label as the feature vector.
"""
# Collect one example for each label
labels_idx_examples = []
for label_name, idx in label_id_dict.items():
label_example = self._find_example_for_label(
label_name, training_data.intent_examples, attribute
)
labels_idx_examples.append((idx, label_example))
# Sort the list of tuples based on label_idx
labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])
labels_example = [example for (_, example) in labels_idx_examples]
# Collect features, precomputed if they exist, else compute on the fly
if self._check_labels_features_exist(labels_example, attribute):
(
sequence_features,
sentence_features,
) = self._extract_labels_precomputed_features(labels_example, attribute)
else:
sequence_features = None
sentence_features = self._compute_default_label_features(labels_example)
label_data = RasaModelData()
label_data.add_features(LABEL, SEQUENCE, sequence_features)
label_data.add_features(LABEL, SENTENCE, sentence_features)
if label_data.does_feature_not_exist(
LABEL, SENTENCE
) and label_data.does_feature_not_exist(LABEL, SEQUENCE):
raise ValueError(
"No label features are present. Please check your configuration file."
)
label_ids = np.array([idx for (idx, _) in labels_idx_examples])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
return label_data
def _use_default_label_features(self, label_ids: np.ndarray) -> List[FeatureArray]:
feature_arrays: List[FeatureArray] = self._label_data.get(LABEL, SENTENCE)
all_label_features = feature_arrays[0]
return [
FeatureArray(
np.array([all_label_features[label_id] for label_id in label_ids]),
number_of_dimensions=all_label_features.number_of_dimensions,
)
]
def _create_model_data(
self,
training_data: List[Message],
label_id_dict: Optional[Dict[Text, int]] = None,
label_attribute: Optional[Text] = None,
training: bool = True,
) -> RasaModelData:
"""Prepare data for training and create a RasaModelData object."""
from rasa.utils.tensorflow import model_data_utils
attributes_to_consider = [TEXT]
if training and self.component_config[INTENT_CLASSIFICATION]:
# we don't have any intent labels during prediction, just add them during
# training
attributes_to_consider.append(label_attribute)
if (
training
and self.component_config[ENTITY_RECOGNITION]
and self._entity_tag_specs
):
# Add entities as labels only during training and only if there was
# training data added for entities with DIET configured to predict entities.
attributes_to_consider.append(ENTITIES)
if training and label_attribute is not None:
# only use those training examples that have the label_attribute set
# during training
training_data = [
example for example in training_data if label_attribute in example.data
]
training_data = [
message
for message in training_data
if message.features_present(
attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS)
)
]
if not training_data:
# no training data are present to train
return RasaModelData()
(
features_for_examples,
sparse_feature_sizes,
) = model_data_utils.featurize_training_examples(
training_data,
attributes_to_consider,
entity_tag_specs=self._entity_tag_specs,
featurizers=self.component_config[FEATURIZERS],
bilou_tagging=self.component_config[BILOU_FLAG],
)
attribute_data, _ = model_data_utils.convert_to_data_format(
features_for_examples, consider_dialogue_dimension=False
)
model_data = RasaModelData(
label_key=self.label_key, label_sub_key=self.label_sub_key
)
model_data.add_data(attribute_data)
model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)
# Current implementation doesn't yet account for updating sparse
# feature sizes of label attributes. That's why we remove them.
sparse_feature_sizes = self._remove_label_sparse_feature_sizes(
sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute
)
model_data.add_sparse_feature_sizes(sparse_feature_sizes)
self._add_label_features(
model_data, training_data, label_attribute, label_id_dict, training
)
# make sure all keys are in the same order during training and prediction
# as we rely on the order of key and sub-key when constructing the actual
# tensors from the model data
model_data.sort()
return model_data
@staticmethod
def _remove_label_sparse_feature_sizes(
sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
label_attribute: Optional[Text] = None,
) -> Dict[Text, Dict[Text, List[int]]]:
if label_attribute in sparse_feature_sizes:
del sparse_feature_sizes[label_attribute]
return sparse_feature_sizes
def _add_label_features(
self,
model_data: RasaModelData,
training_data: List[Message],
label_attribute: Text,
label_id_dict: Dict[Text, int],
training: bool = True,
) -> None:
label_ids = []
if training and self.component_config[INTENT_CLASSIFICATION]:
for example in training_data:
if example.get(label_attribute):
label_ids.append(label_id_dict[example.get(label_attribute)])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
model_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
if (
label_attribute
and model_data.does_feature_not_exist(label_attribute, SENTENCE)
and model_data.does_feature_not_exist(label_attribute, SEQUENCE)
):
# no label features are present, get default features from _label_data
model_data.add_features(
LABEL, SENTENCE, self._use_default_label_features(np.array(label_ids))
)
# as label_attribute can have different values, e.g. INTENT or RESPONSE,
# copy over the features to the LABEL key to make
# it easier to access the label features inside the model itself
model_data.update_key(label_attribute, SENTENCE, LABEL, SENTENCE)
model_data.update_key(label_attribute, SEQUENCE, LABEL, SEQUENCE)
model_data.update_key(label_attribute, MASK, LABEL, MASK)
model_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
# train helpers
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
"""Prepares data for training.
Performs sanity checks on training data, extracts encodings for labels.
"""
if self.component_config[BILOU_FLAG]:
bilou_utils.apply_bilou_schema(training_data)
label_id_index_mapping = self._label_id_index_mapping(
training_data, attribute=INTENT
)
if not label_id_index_mapping:
# no labels are present to train
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(
training_data, label_id_index_mapping, attribute=INTENT
)
self._entity_tag_specs = self._create_entity_tag_specs(training_data)
label_attribute = (
INTENT if self.component_config[INTENT_CLASSIFICATION] else None
)
model_data = self._create_model_data(
training_data.nlu_examples,
label_id_index_mapping,
label_attribute=label_attribute,
)
self._check_input_dimension_consistency(model_data)
return model_data
@staticmethod
def _check_enough_labels(model_data: RasaModelData) -> bool:
return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2
def train(self, training_data: TrainingData) -> Resource:
"""Train the embedding intent classifier on a data set."""
model_data = self.preprocess_train_data(training_data)
if model_data.is_empty():
logger.debug(
f"Cannot train '{self.__class__.__name__}'. No data was provided. "
f"Skipping training of the classifier."
)
return self._resource
if not self.model and self.finetune_mode:
raise rasa.shared.exceptions.InvalidParameterException(
f"{self.__class__.__name__} was instantiated "
f"with `model=None` and `finetune_mode=True`. "
f"This is not a valid combination as the component "
f"needs an already instantiated and trained model "
f"to continue training in finetune mode."
)
if self.component_config.get(INTENT_CLASSIFICATION):
if not self._check_enough_labels(model_data):
logger.error(
f"Cannot train '{self.__class__.__name__}'. "
f"Need at least 2 different intent classes. "
f"Skipping training of classifier."
)
return self._resource
if self.component_config.get(ENTITY_RECOGNITION):
self.check_correct_entity_annotations(training_data)
# keep one example for persisting and loading
self._data_example = model_data.first_data_example()
if not self.finetune_mode:
# No pre-trained model to load from. Create a new instance of the model.
self.model = self._instantiate_model_class(model_data)
self.model.compile(
optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE])
)
else:
self.model.adjust_for_incremental_training(
data_example=self._data_example,
new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(),
old_sparse_feature_sizes=self._sparse_feature_sizes,
)
self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()
data_generator, validation_data_generator = train_utils.create_data_generators(
model_data,
self.component_config[BATCH_SIZES],
self.component_config[EPOCHS],
self.component_config[BATCH_STRATEGY],
self.component_config[EVAL_NUM_EXAMPLES],
self.component_config[RANDOM_SEED],
)
callbacks = train_utils.create_common_callbacks(
self.component_config[EPOCHS],
self.component_config[TENSORBOARD_LOG_DIR],
self.component_config[TENSORBOARD_LOG_LEVEL],
self.tmp_checkpoint_dir,
)
self.model.fit(
data_generator,
epochs=self.component_config[EPOCHS],
validation_data=validation_data_generator,
validation_freq=self.component_config[EVAL_NUM_EPOCHS],
callbacks=callbacks,
verbose=False,
shuffle=False, # we use custom shuffle inside data generator
)
self.persist()
return self._resource
# process helpers
def _predict(
self, message: Message
) -> Optional[Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]]:
if self.model is None:
logger.debug(
f"There is no trained model for '{self.__class__.__name__}': The "
f"component is either not trained or didn't receive enough training "
f"data."
)
return None
# create session data from message and convert it into a batch of 1
model_data = self._create_model_data([message], training=False)
if model_data.is_empty():
return None
return self.model.run_inference(model_data)
def _predict_label(
self, predict_out: Optional[Dict[Text, tf.Tensor]]
) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]:
"""Predicts the intent of the provided message."""
label: Dict[Text, Any] = {"name": None, "confidence": 0.0}
label_ranking = []
if predict_out is None:
return label, label_ranking
message_sim = predict_out["i_scores"]
message_sim = message_sim.flatten() # sim is a matrix
# if X contains all zeros do not predict some label
if message_sim.size == 0:
return label, label_ranking
# rank the confidences
ranking_length = self.component_config[RANKING_LENGTH]
renormalize = (
self.component_config[RENORMALIZE_CONFIDENCES]
and self.component_config[MODEL_CONFIDENCE] == SOFTMAX
)
ranked_label_indices, message_sim = train_utils.rank_and_mask(
message_sim, ranking_length=ranking_length, renormalize=renormalize
)
# construct the label and ranking
casted_message_sim: List[float] = message_sim.tolist() # np.float to float
top_label_idx = ranked_label_indices[0]
label = {
"name": self.index_label_id_mapping[top_label_idx],
"confidence": casted_message_sim[top_label_idx],
}
ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]
label_ranking = [
{"name": self.index_label_id_mapping[label_idx], "confidence": score}
for label_idx, score in ranking
]
return label, label_ranking
def _predict_entities(
self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message
) -> List[Dict]:
if predict_out is None:
return []
predicted_tags, confidence_values = train_utils.entity_label_to_tags(
predict_out, self._entity_tag_specs, self.component_config[BILOU_FLAG]
)
entities = self.convert_predictions_into_entities(
message.get(TEXT),
message.get(TOKENS_NAMES[TEXT], []),
predicted_tags,
self.split_entities_config,
confidence_values,
)
entities = self.add_extractor_name(entities)
entities = message.get(ENTITIES, []) + entities
return entities
def process(self, messages: List[Message]) -> List[Message]:
"""Augments the message with intents, entities, and diagnostic data."""
for message in messages:
out = self._predict(message)
if self.component_config[INTENT_CLASSIFICATION]:
label, label_ranking = self._predict_label(out)
message.set(INTENT, label, add_to_output=True)
message.set("intent_ranking", label_ranking, add_to_output=True)
if self.component_config[ENTITY_RECOGNITION]:
entities = self._predict_entities(out, message)
message.set(ENTITIES, entities, add_to_output=True)
if out and self._execution_context.should_add_diagnostic_data:
message.add_diagnostic_data(
self._execution_context.node_name, out.get(DIAGNOSTIC_DATA)
)
return messages
def persist(self) -> None:
"""Persist this model into the passed directory."""
if self.model is None:
return None
with self._model_storage.write_to(self._resource) as model_path:
file_name = self.__class__.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
rasa.shared.utils.io.create_directory_for_file(tf_model_file)
if self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir:
self.model.load_weights(self.tmp_checkpoint_dir / "checkpoint.tf_model")
# Save an empty file to flag that this model has been
# produced using checkpointing
checkpoint_marker = model_path / f"{file_name}.from_checkpoint.pkl"
checkpoint_marker.touch()
self.model.save(str(tf_model_file))
io_utils.pickle_dump(
model_path / f"{file_name}.data_example.pkl", self._data_example
)
io_utils.pickle_dump(
model_path / f"{file_name}.sparse_feature_sizes.pkl",
self._sparse_feature_sizes,
)
io_utils.pickle_dump(
model_path / f"{file_name}.label_data.pkl", dict(self._label_data.data)
)
io_utils.json_pickle(
model_path / f"{file_name}.index_label_id_mapping.json",
self.index_label_id_mapping,
)
entity_tag_specs = (
[tag_spec._asdict() for tag_spec in self._entity_tag_specs]
if self._entity_tag_specs
else []
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
model_path / f"{file_name}.entity_tag_specs.json", entity_tag_specs
)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> DIETClassifier:
"""Loads a policy from the storage (see parent class for full docstring)."""
try:
with model_storage.read_from(resource) as model_path:
return cls._load(
model_path, config, model_storage, resource, execution_context
)
except ValueError:
logger.debug(
f"Failed to load {cls.__class__.__name__} from model storage. Resource "
f"'{resource.name}' doesn't exist."
)
return cls(config, model_storage, resource, execution_context)
@classmethod
def _load(
cls,
model_path: Path,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> "DIETClassifier":
"""Loads the trained model from the provided directory."""
(
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
) = cls._load_from_files(model_path)
config = train_utils.update_confidence_type(config)
config = train_utils.update_similarity_type(config)
model = cls._load_model(
entity_tag_specs,
label_data,
config,
data_example,
model_path,
finetune_mode=execution_context.is_finetuning,
)
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
index_label_id_mapping=index_label_id_mapping,
entity_tag_specs=entity_tag_specs,
model=model,
sparse_feature_sizes=sparse_feature_sizes,
)
@classmethod
def _load_from_files(
cls, model_path: Path
) -> Tuple[
Dict[int, Text],
List[EntityTagSpec],
RasaModelData,
Dict[Text, Dict[Text, List[FeatureArray]]],
Dict[Text, Dict[Text, List[int]]],
]:
file_name = cls.__name__
data_example = io_utils.pickle_load(
model_path / f"{file_name}.data_example.pkl"
)
label_data = io_utils.pickle_load(model_path / f"{file_name}.label_data.pkl")
label_data = RasaModelData(data=label_data)
sparse_feature_sizes = io_utils.pickle_load(
model_path / f"{file_name}.sparse_feature_sizes.pkl"
)
index_label_id_mapping = io_utils.json_unpickle(
model_path / f"{file_name}.index_label_id_mapping.json"
)
entity_tag_specs = rasa.shared.utils.io.read_json_file(
model_path / f"{file_name}.entity_tag_specs.json"
)
entity_tag_specs = [
EntityTagSpec(
tag_name=tag_spec["tag_name"],
ids_to_tags={
int(key): value for key, value in tag_spec["ids_to_tags"].items()
},
tags_to_ids={
key: int(value) for key, value in tag_spec["tags_to_ids"].items()
},
num_tags=tag_spec["num_tags"],
)
for tag_spec in entity_tag_specs
]
# jsonpickle converts dictionary keys to strings
index_label_id_mapping = {
int(key): value for key, value in index_label_id_mapping.items()
}
return (
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
)
@classmethod
def _load_model(
cls,
entity_tag_specs: List[EntityTagSpec],
label_data: RasaModelData,
config: Dict[Text, Any],
data_example: Dict[Text, Dict[Text, List[FeatureArray]]],
model_path: Path,
finetune_mode: bool = False,
) -> "RasaModel":
file_name = cls.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
label_key = LABEL_KEY if config[INTENT_CLASSIFICATION] else None
label_sub_key = LABEL_SUB_KEY if config[INTENT_CLASSIFICATION] else None
model_data_example = RasaModelData(
label_key=label_key, label_sub_key=label_sub_key, data=data_example
)
model = cls._load_model_class(
tf_model_file,
model_data_example,
label_data,
entity_tag_specs,
config,
finetune_mode=finetune_mode,
)
return model
@classmethod
def _load_model_class(
cls,
tf_model_file: Text,
model_data_example: RasaModelData,
label_data: RasaModelData,
entity_tag_specs: List[EntityTagSpec],
config: Dict[Text, Any],
finetune_mode: bool,
) -> "RasaModel":
predict_data_example = RasaModelData(
label_key=model_data_example.label_key,
data={
feature_name: features
for feature_name, features in model_data_example.items()
if TEXT in feature_name
},
)
return cls.model_class().load(
tf_model_file,
model_data_example,
predict_data_example,
data_signature=model_data_example.get_signature(),
label_data=label_data,
entity_tag_specs=entity_tag_specs,
config=copy.deepcopy(config),
finetune_mode=finetune_mode,
)
def _instantiate_model_class(self, model_data: RasaModelData) -> "RasaModel":
return self.model_class()(
data_signature=model_data.get_signature(),
label_data=self._label_data,
entity_tag_specs=self._entity_tag_specs,
config=self.component_config,
)
class DIET(TransformerRasaModel):
def __init__(
self,
data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],
label_data: RasaModelData,
entity_tag_specs: Optional[List[EntityTagSpec]],
config: Dict[Text, Any],
) -> None:
# create entity tag spec before calling super otherwise building the model
# will fail
super().__init__("DIET", config, data_signature, label_data)
self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs)
self.predict_data_signature = {
feature_name: features
for feature_name, features in data_signature.items()
if TEXT in feature_name
}
# tf training
self._create_metrics()
self._update_metrics_to_log()
# needed for efficient prediction
self.all_labels_embed: Optional[tf.Tensor] = None
self._prepare_layers()
@staticmethod
def _ordered_tag_specs(
entity_tag_specs: Optional[List[EntityTagSpec]],
) -> List[EntityTagSpec]:
"""Ensure that order of entity tag specs matches CRF layer order."""
if entity_tag_specs is None:
return []
crf_order = [
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_ROLE,
ENTITY_ATTRIBUTE_GROUP,
]
ordered_tag_spec = []
for tag_name in crf_order:
for tag_spec in entity_tag_specs:
if tag_name == tag_spec.tag_name:
ordered_tag_spec.append(tag_spec)
return ordered_tag_spec
def _check_data(self) -> None:
if TEXT not in self.data_signature:
raise InvalidConfigException(
f"No text features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[INTENT_CLASSIFICATION]:
if LABEL not in self.data_signature:
raise InvalidConfigException(
f"No label features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[SHARE_HIDDEN_LAYERS]:
different_sentence_signatures = False
different_sequence_signatures = False
if (
SENTENCE in self.data_signature[TEXT]
and SENTENCE in self.data_signature[LABEL]
):
different_sentence_signatures = (
self.data_signature[TEXT][SENTENCE]
!= self.data_signature[LABEL][SENTENCE]
)
if (
SEQUENCE in self.data_signature[TEXT]
and SEQUENCE in self.data_signature[LABEL]
):
different_sequence_signatures = (
self.data_signature[TEXT][SEQUENCE]
!= self.data_signature[LABEL][SEQUENCE]
)
if different_sentence_signatures or different_sequence_signatures:
raise ValueError(
"If hidden layer weights are shared, data signatures "
"for text_features and label_features must coincide."
)
if self.config[ENTITY_RECOGNITION] and (
ENTITIES not in self.data_signature
or ENTITY_ATTRIBUTE_TYPE not in self.data_signature[ENTITIES]
):
logger.debug(
f"You specified '{self.__class__.__name__}' to train entities, but "
f"no entities are present in the training data. Skipping training of "
f"entities."
)
self.config[ENTITY_RECOGNITION] = False
def _create_metrics(self) -> None:
# self.metrics will have the same order as they are created
# so create loss metrics first to output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.intent_loss = tf.keras.metrics.Mean(name="i_loss")
self.entity_loss = tf.keras.metrics.Mean(name="e_loss")
self.entity_group_loss = tf.keras.metrics.Mean(name="g_loss")
self.entity_role_loss = tf.keras.metrics.Mean(name="r_loss")
# create accuracy metrics second to output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.intent_acc = tf.keras.metrics.Mean(name="i_acc")
self.entity_f1 = tf.keras.metrics.Mean(name="e_f1")
self.entity_group_f1 = tf.keras.metrics.Mean(name="g_f1")
self.entity_role_f1 = tf.keras.metrics.Mean(name="r_f1")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
if self.config[INTENT_CLASSIFICATION]:
self.metrics_to_log.append("i_acc")
if debug_log_level:
self.metrics_to_log.append("i_loss")
if self.config[ENTITY_RECOGNITION]:
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags != 0:
name = tag_spec.tag_name
self.metrics_to_log.append(f"{name[0]}_f1")
if debug_log_level:
self.metrics_to_log.append(f"{name[0]}_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {
"t": "total",
"i": "intent",
"e": "entity",
"m": "mask",
"r": "role",
"g": "group",
}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _prepare_layers(self) -> None:
# For user text, prepare layers that combine different feature types, embed
# everything using a transformer and optionally also do masked language
# modeling.
self.text_name = TEXT
self._tf_layers[
f"sequence_layer.{self.text_name}"
] = rasa_layers.RasaSequenceLayer(
self.text_name, self.data_signature[self.text_name], self.config
)
if self.config[MASKED_LM]:
self._prepare_mask_lm_loss(self.text_name)
# Intent labels are treated similarly to user text but without the transformer,
# without masked language modelling, and with no dropout applied to the
# individual features, only to the overall label embedding after all label
# features have been combined.
if self.config[INTENT_CLASSIFICATION]:
self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL
# disable input dropout applied to sparse and dense label features
label_config = self.config.copy()
label_config.update(
{SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}
)
self._tf_layers[
f"feature_combining_layer.{self.label_name}"
] = rasa_layers.RasaFeatureCombiningLayer(
self.label_name, self.label_signature[self.label_name], label_config
)
self._prepare_ffnn_layer(
self.label_name,
self.config[HIDDEN_LAYERS_SIZES][self.label_name],
self.config[DROP_RATE],
)
self._prepare_label_classification_layers(predictor_attribute=TEXT)
if self.config[ENTITY_RECOGNITION]:
self._prepare_entity_recognition_layers()
def _prepare_mask_lm_loss(self, name: Text) -> None:
# for embedding predicted tokens at masked positions
self._prepare_embed_layers(f"{name}_lm_mask")
# for embedding the true tokens that got masked
self._prepare_embed_layers(f"{name}_golden_token")
# mask loss is additional loss
# set scaling to False, so that it doesn't overpower other losses
self._prepare_dot_product_loss(f"{name}_mask", scale_loss=False)
def _create_bow(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sequence_feature_lengths: tf.Tensor,
name: Text,
) -> tf.Tensor:
x, _ = self._tf_layers[f"feature_combining_layer.{name}"](
(sequence_features, sentence_features, sequence_feature_lengths),
training=self._training,
)
# convert to bag-of-words by summing along the sequence dimension
x = tf.reduce_sum(x, axis=1)
return self._tf_layers[f"ffnn.{name}"](x, self._training)
def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:
all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]
sequence_feature_lengths = self._get_sequence_feature_lengths(
self.tf_label_data, LABEL
)
x = self._create_bow(
self.tf_label_data[LABEL][SEQUENCE],
self.tf_label_data[LABEL][SENTENCE],
sequence_feature_lengths,
self.label_name,
)
all_labels_embed = self._tf_layers[f"embed.{LABEL}"](x)
return all_label_ids, all_labels_embed
def _mask_loss(
self,
outputs: tf.Tensor,
inputs: tf.Tensor,
seq_ids: tf.Tensor,
mlm_mask_boolean: tf.Tensor,
name: Text,
) -> tf.Tensor:
# make sure there is at least one element in the mask
mlm_mask_boolean = tf.cond(
tf.reduce_any(mlm_mask_boolean),
lambda: mlm_mask_boolean,
lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(mlm_mask_boolean)),
)
mlm_mask_boolean = tf.squeeze(mlm_mask_boolean, -1)
# Pick elements that were masked, throwing away the batch & sequence dimension
# and effectively switching from shape (batch_size, sequence_length, units) to
# (num_masked_elements, units).
outputs = tf.boolean_mask(outputs, mlm_mask_boolean)
inputs = tf.boolean_mask(inputs, mlm_mask_boolean)
ids = tf.boolean_mask(seq_ids, mlm_mask_boolean)
tokens_predicted_embed = self._tf_layers[f"embed.{name}_lm_mask"](outputs)
tokens_true_embed = self._tf_layers[f"embed.{name}_golden_token"](inputs)
# To limit the otherwise computationally expensive loss calculation, we
# constrain the label space in MLM (i.e. token space) to only those tokens that
# were masked in this batch. Hence the reduced list of token embeddings
# (tokens_true_embed) and the reduced list of labels (ids) are passed as
# all_labels_embed and all_labels, respectively. In the future, we could be less
# restrictive and construct a slightly bigger label space which could include
# tokens not masked in the current batch too.
return self._tf_layers[f"loss.{name}_mask"](
inputs_embed=tokens_predicted_embed,
labels_embed=tokens_true_embed,
labels=ids,
all_labels_embed=tokens_true_embed,
all_labels=ids,
)
def _calculate_label_loss(
self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor
) -> tf.Tensor:
all_label_ids, all_labels_embed = self._create_all_labels()
text_embed = self._tf_layers[f"embed.{TEXT}"](text_features)
label_embed = self._tf_layers[f"embed.{LABEL}"](label_features)
return self._tf_layers[f"loss.{LABEL}"](
text_embed, label_embed, label_ids, all_labels_embed, all_label_ids
)
# MASKED: batch_loss function (lines 1559-1628)
def _batch_loss_intent(
self,
combined_sequence_sentence_feature_lengths_text: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> tf.Tensor:
# get sentence features vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths_text
)
sequence_feature_lengths_label = self._get_sequence_feature_lengths(
tf_batch_data, LABEL
)
label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]
label = self._create_bow(
tf_batch_data[LABEL][SEQUENCE],
tf_batch_data[LABEL][SENTENCE],
sequence_feature_lengths_label,
self.label_name,
)
loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids)
self._update_label_metrics(loss, acc)
return loss
def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:
self.intent_loss.update_state(loss)
self.intent_acc.update_state(acc)
def _batch_loss_entities(
self,
mask_combined_sequence_sentence: tf.Tensor,
sequence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> List[tf.Tensor]:
losses = []
entity_tags = None
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags == 0:
continue
tag_ids = tf_batch_data[ENTITIES][tag_spec.tag_name][0]
# add a zero (no entity) for the sentence features to match the shape of
# inputs
tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]])
loss, f1, _logits = self._calculate_entity_loss(
text_transformed,
tag_ids,
mask_combined_sequence_sentence,
sequence_feature_lengths,
tag_spec.tag_name,
entity_tags,
)
if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags
)
self._update_entity_metrics(loss, f1, tag_spec.tag_name)
losses.append(loss)
return losses
def _update_entity_metrics(
self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text
) -> None:
if tag_name == ENTITY_ATTRIBUTE_TYPE:
self.entity_loss.update_state(loss)
self.entity_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
self.entity_group_loss.update_state(loss)
self.entity_group_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_ROLE:
self.entity_role_loss.update_state(loss)
self.entity_role_f1.update_state(f1)
def prepare_for_predict(self) -> None:
"""Prepares the model for prediction."""
if self.config[INTENT_CLASSIFICATION]:
_, self.all_labels_embed = self._create_all_labels()
def batch_predict(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> Dict[Text, tf.Tensor]:
"""Predicts the output of the given batch.
Args:
batch_in: The batch.
Returns:
The output to predict.
"""
tf_batch_data = self.batch_to_model_data_format(
batch_in, self.predict_data_signature
)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
text_transformed, _, _, _, _, attention_weights = self._tf_layers[
f"sequence_layer.{self.text_name}"
](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
predictions = {
DIAGNOSTIC_DATA: {
"attention_weights": attention_weights,
"text_transformed": text_transformed,
}
}
if self.config[INTENT_CLASSIFICATION]:
predictions.update(
self._batch_predict_intents(
sequence_feature_lengths + sentence_feature_lengths,
text_transformed,
)
)
if self.config[ENTITY_RECOGNITION]:
predictions.update(
self._batch_predict_entities(sequence_feature_lengths, text_transformed)
)
return predictions
def _batch_predict_entities(
self, sequence_feature_lengths: tf.Tensor, text_transformed: tf.Tensor
) -> Dict[Text, tf.Tensor]:
predictions: Dict[Text, tf.Tensor] = {}
entity_tags = None
for tag_spec in self._entity_tag_specs:
# skip crf layer if it was not trained
if tag_spec.num_tags == 0:
continue
name = tag_spec.tag_name
_input = text_transformed
if entity_tags is not None:
_tags = self._tf_layers[f"embed.{name}.tags"](entity_tags)
_input = tf.concat([_input, _tags], axis=-1)
_logits = self._tf_layers[f"embed.{name}.logits"](_input)
pred_ids, confidences = self._tf_layers[f"crf.{name}"](
_logits, sequence_feature_lengths
)
predictions[f"e_{name}_ids"] = pred_ids
predictions[f"e_{name}_scores"] = confidences
if name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags
)
return predictions
def _batch_predict_intents(
self,
combined_sequence_sentence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
) -> Dict[Text, tf.Tensor]:
if self.all_labels_embed is None:
raise ValueError(
"The model was not prepared for prediction. "
"Call `prepare_for_predict` first."
)
# get sentence feature vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths
)
sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector)
_, scores = self._tf_layers[
f"loss.{LABEL}"
].get_similarities_and_confidences_from_embeddings(
sentence_vector_embed[:, tf.newaxis, :],
self.all_labels_embed[tf.newaxis, :, :],
)
return {"i_scores": scores}
|
def batch_loss(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> tf.Tensor:
"""Calculates the loss for the given batch.
Args:
batch_in: The batch.
Returns:
The loss of the given batch.
"""
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
(
text_transformed,
text_in,
mask_combined_sequence_sentence,
text_seq_ids,
mlm_mask_boolean_text,
_,
) = self._tf_layers[f"sequence_layer.{self.text_name}"](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
losses = []
# Lengths of sequences in case of sentence-level features are always 1, but they
# can effectively be 0 if sentence-level features aren't present.
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
combined_sequence_sentence_feature_lengths = (
sequence_feature_lengths + sentence_feature_lengths
)
if self.config[MASKED_LM]:
loss, acc = self._mask_loss(
text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT
)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
if self.config[INTENT_CLASSIFICATION]:
loss = self._batch_loss_intent(
combined_sequence_sentence_feature_lengths,
text_transformed,
tf_batch_data,
)
losses.append(loss)
if self.config[ENTITY_RECOGNITION]:
losses += self._batch_loss_entities(
mask_combined_sequence_sentence,
sequence_feature_lengths,
text_transformed,
tf_batch_data,
)
return tf.math.add_n(losses)
| 1,559
| 1,628
|
from __future__ import annotations
import copy
import logging
from collections import defaultdict
from pathlib import Path
from rasa.nlu.featurizers.featurizer import Featurizer
import numpy as np
import scipy.sparse
import tensorflow as tf
from typing import Any, Dict, List, Optional, Text, Tuple, Union, Type
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.extractors.extractor import EntityExtractorMixin
from rasa.nlu.classifiers.classifier import IntentClassifier
import rasa.shared.utils.io
import rasa.utils.io as io_utils
import rasa.nlu.utils.bilou_utils as bilou_utils
from rasa.shared.constants import DIAGNOSTIC_DATA
from rasa.nlu.extractors.extractor import EntityTagSpec
from rasa.nlu.classifiers import LABEL_RANKING_LENGTH
from rasa.utils import train_utils
from rasa.utils.tensorflow import rasa_layers
from rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel
from rasa.utils.tensorflow.model_data import (
RasaModelData,
FeatureSignature,
FeatureArray,
)
from rasa.nlu.constants import TOKENS_NAMES, DEFAULT_TRANSFORMER_SIZE
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
TEXT,
INTENT,
INTENT_RESPONSE_KEY,
ENTITIES,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_GROUP,
ENTITY_ATTRIBUTE_ROLE,
NO_ENTITY_TAG,
SPLIT_ENTITIES_BY_COMMA,
)
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.utils.tensorflow.constants import (
LABEL,
IDS,
HIDDEN_LAYERS_SIZES,
RENORMALIZE_CONFIDENCES,
SHARE_HIDDEN_LAYERS,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
LEARNING_RATE,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
TENSORBOARD_LOG_DIR,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
UNIDIRECTIONAL_ENCODER,
DROP_RATE,
DROP_RATE_ATTENTION,
CONNECTION_DENSITY,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
BILOU_FLAG,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
AUTO,
BALANCED,
CROSS_ENTROPY,
TENSORBOARD_LOG_LEVEL,
CONCAT_DIMENSION,
FEATURIZERS,
CHECKPOINT_MODEL,
SEQUENCE,
SENTENCE,
SEQUENCE_LENGTH,
DENSE_DIMENSION,
MASK,
CONSTRAIN_SIMILARITIES,
MODEL_CONFIDENCE,
SOFTMAX,
)
logger = logging.getLogger(__name__)
SPARSE = "sparse"
DENSE = "dense"
LABEL_KEY = LABEL
LABEL_SUB_KEY = IDS
POSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]
@DefaultV1Recipe.register(
[
DefaultV1Recipe.ComponentType.INTENT_CLASSIFIER,
DefaultV1Recipe.ComponentType.ENTITY_EXTRACTOR,
],
is_trainable=True,
)
class DIETClassifier(GraphComponent, IntentClassifier, EntityExtractorMixin):
"""A multi-task model for intent classification and entity extraction.
DIET is Dual Intent and Entity Transformer.
The architecture is based on a transformer which is shared for both tasks.
A sequence of entity labels is predicted through a Conditional Random Field (CRF)
tagging layer on top of the transformer output sequence corresponding to the
input sequence of tokens. The transformer output for the ``__CLS__`` token and
intent labels are embedded into a single semantic vector space. We use the
dot-product loss to maximize the similarity with the target label and minimize
similarities with negative samples.
"""
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [Featurizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""The component's default config (see parent class for full docstring)."""
# please make sure to update the docs when changing a default parameter
return {
# ## Architecture of the used neural network
# Hidden layer sizes for layers before the embedding layers for user message
# and labels.
# The number of hidden layers is equal to the length of the corresponding
# list.
HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []},
# Whether to share the hidden layer weights between user message and labels.
SHARE_HIDDEN_LAYERS: False,
# Number of units in transformer
TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE,
# Number of transformer layers
NUM_TRANSFORMER_LAYERS: 2,
# Number of attention heads in transformer
NUM_HEADS: 4,
# If 'True' use key relative embeddings in attention
KEY_RELATIVE_ATTENTION: False,
# If 'True' use value relative embeddings in attention
VALUE_RELATIVE_ATTENTION: False,
# Max position for relative embeddings. Only in effect if key- or value
# relative attention are turned on
MAX_RELATIVE_POSITION: 5,
# Use a unidirectional or bidirectional encoder.
UNIDIRECTIONAL_ENCODER: False,
# ## Training parameters
# Initial and final batch sizes:
# Batch size will be linearly increased for each epoch.
BATCH_SIZES: [64, 256],
# Strategy used when creating batches.
# Can be either 'sequence' or 'balanced'.
BATCH_STRATEGY: BALANCED,
# Number of epochs to train
EPOCHS: 300,
# Set random seed to any 'int' to get reproducible results
RANDOM_SEED: None,
# Initial learning rate for the optimizer
LEARNING_RATE: 0.001,
# ## Parameters for embeddings
# Dimension size of embedding vectors
EMBEDDING_DIMENSION: 20,
# Dense dimension to use for sparse features.
DENSE_DIMENSION: {TEXT: 128, LABEL: 20},
# Default dimension to use for concatenating sequence and sentence features.
CONCAT_DIMENSION: {TEXT: 128, LABEL: 20},
# The number of incorrect labels. The algorithm will minimize
# their similarity to the user input during training.
NUM_NEG: 20,
# Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.
SIMILARITY_TYPE: AUTO,
# The type of the loss function, either 'cross_entropy' or 'margin'.
LOSS_TYPE: CROSS_ENTROPY,
# Number of top intents for which confidences should be reported.
# Set to 0 if confidences for all intents should be reported.
RANKING_LENGTH: LABEL_RANKING_LENGTH,
# Indicates how similar the algorithm should try to make embedding vectors
# for correct labels.
# Should be 0.0 < ... < 1.0 for 'cosine' similarity type.
MAX_POS_SIM: 0.8,
# Maximum negative similarity for incorrect labels.
# Should be -1.0 < ... < 1.0 for 'cosine' similarity type.
MAX_NEG_SIM: -0.4,
# If 'True' the algorithm only minimizes maximum similarity over
# incorrect intent labels, used only if 'loss_type' is set to 'margin'.
USE_MAX_NEG_SIM: True,
# If 'True' scale loss inverse proportionally to the confidence
# of the correct prediction
SCALE_LOSS: False,
# ## Regularization parameters
# The scale of regularization
REGULARIZATION_CONSTANT: 0.002,
# The scale of how important is to minimize the maximum similarity
# between embeddings of different labels,
# used only if 'loss_type' is set to 'margin'.
NEGATIVE_MARGIN_SCALE: 0.8,
# Dropout rate for encoder
DROP_RATE: 0.2,
# Dropout rate for attention
DROP_RATE_ATTENTION: 0,
# Fraction of trainable weights in internal layers.
CONNECTION_DENSITY: 0.2,
# If 'True' apply dropout to sparse input tensors
SPARSE_INPUT_DROPOUT: True,
# If 'True' apply dropout to dense input tensors
DENSE_INPUT_DROPOUT: True,
# ## Evaluation parameters
# How often calculate validation accuracy.
# Small values may hurt performance.
EVAL_NUM_EPOCHS: 20,
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
# Set to 0 for no validation.
EVAL_NUM_EXAMPLES: 0,
# ## Model config
# If 'True' intent classification is trained and intent predicted.
INTENT_CLASSIFICATION: True,
# If 'True' named entity recognition is trained and entities predicted.
ENTITY_RECOGNITION: True,
# If 'True' random tokens of the input message will be masked and the model
# should predict those tokens.
MASKED_LM: False,
# 'BILOU_flag' determines whether to use BILOU tagging or not.
# If set to 'True' labelling is more rigorous, however more
# examples per entity are required.
# Rule of thumb: you should have more than 100 examples per entity.
BILOU_FLAG: True,
# If you want to use tensorboard to visualize training and validation
# metrics, set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'batch'
TENSORBOARD_LOG_LEVEL: "epoch",
# Perform model checkpointing
CHECKPOINT_MODEL: False,
# Specify what features to use as sequence and sentence features
# By default all features in the pipeline are used.
FEATURIZERS: [],
# Split entities by comma, this makes sense e.g. for a list of ingredients
# in a recipie, but it doesn't make sense for the parts of an address
SPLIT_ENTITIES_BY_COMMA: True,
# If 'True' applies sigmoid on all similarity terms and adds
# it to the loss function to ensure that similarity values are
# approximately bounded. Used inside cross-entropy loss only.
CONSTRAIN_SIMILARITIES: False,
# Model confidence to be returned during inference. Currently, the only
# possible value is `softmax`.
MODEL_CONFIDENCE: SOFTMAX,
# Determines whether the confidences of the chosen top intents should be
# renormalized so that they sum up to 1. By default, we do not renormalize
# and return the confidences for the top intents as is.
# Note that renormalization only makes sense if confidences are generated
# via `softmax`.
RENORMALIZE_CONFIDENCES: False,
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
index_label_id_mapping: Optional[Dict[int, Text]] = None,
entity_tag_specs: Optional[List[EntityTagSpec]] = None,
model: Optional[RasaModel] = None,
sparse_feature_sizes: Optional[Dict[Text, Dict[Text, List[int]]]] = None,
) -> None:
"""Declare instance variables with default values."""
if EPOCHS not in config:
rasa.shared.utils.io.raise_warning(
f"Please configure the number of '{EPOCHS}' in your configuration file."
f" We will change the default value of '{EPOCHS}' in the future to 1. "
)
self.component_config = config
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
self._check_config_parameters()
# transform numbers to labels
self.index_label_id_mapping = index_label_id_mapping or {}
self._entity_tag_specs = entity_tag_specs
self.model = model
self.tmp_checkpoint_dir = None
if self.component_config[CHECKPOINT_MODEL]:
self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())
self._label_data: Optional[RasaModelData] = None
self._data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None
self.split_entities_config = rasa.utils.train_utils.init_split_entities(
self.component_config[SPLIT_ENTITIES_BY_COMMA],
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
)
self.finetune_mode = self._execution_context.is_finetuning
self._sparse_feature_sizes = sparse_feature_sizes
# init helpers
def _check_masked_lm(self) -> None:
if (
self.component_config[MASKED_LM]
and self.component_config[NUM_TRANSFORMER_LAYERS] == 0
):
raise ValueError(
f"If number of transformer layers is 0, "
f"'{MASKED_LM}' option should be 'False'."
)
def _check_share_hidden_layers_sizes(self) -> None:
if self.component_config.get(SHARE_HIDDEN_LAYERS):
first_hidden_layer_sizes = next(
iter(self.component_config[HIDDEN_LAYERS_SIZES].values())
)
# check that all hidden layer sizes are the same
identical_hidden_layer_sizes = all(
current_hidden_layer_sizes == first_hidden_layer_sizes
for current_hidden_layer_sizes in self.component_config[
HIDDEN_LAYERS_SIZES
].values()
)
if not identical_hidden_layer_sizes:
raise ValueError(
f"If hidden layer weights are shared, "
f"{HIDDEN_LAYERS_SIZES} must coincide."
)
def _check_config_parameters(self) -> None:
self.component_config = train_utils.check_deprecated_options(
self.component_config
)
self._check_masked_lm()
self._check_share_hidden_layers_sizes()
self.component_config = train_utils.update_confidence_type(
self.component_config
)
train_utils.validate_configuration_settings(self.component_config)
self.component_config = train_utils.update_similarity_type(
self.component_config
)
self.component_config = train_utils.update_evaluation_parameters(
self.component_config
)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> DIETClassifier:
"""Creates a new untrained component (see parent class for full docstring)."""
return cls(config, model_storage, resource, execution_context)
@property
def label_key(self) -> Optional[Text]:
"""Return key if intent classification is activated."""
return LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@property
def label_sub_key(self) -> Optional[Text]:
"""Return sub key if intent classification is activated."""
return LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@staticmethod
def model_class() -> Type[RasaModel]:
return DIET
# training data helpers:
@staticmethod
def _label_id_index_mapping(
training_data: TrainingData, attribute: Text
) -> Dict[Text, int]:
"""Create label_id dictionary."""
distinct_label_ids = {
example.get(attribute) for example in training_data.intent_examples
} - {None}
return {
label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))
}
@staticmethod
def _invert_mapping(mapping: Dict) -> Dict:
return {value: key for key, value in mapping.items()}
def _create_entity_tag_specs(
self, training_data: TrainingData
) -> List[EntityTagSpec]:
"""Create entity tag specifications with their respective tag id mappings."""
_tag_specs = []
for tag_name in POSSIBLE_TAGS:
if self.component_config[BILOU_FLAG]:
tag_id_index_mapping = bilou_utils.build_tag_id_dict(
training_data, tag_name
)
else:
tag_id_index_mapping = self._tag_id_index_mapping_for(
tag_name, training_data
)
if tag_id_index_mapping:
_tag_specs.append(
EntityTagSpec(
tag_name=tag_name,
tags_to_ids=tag_id_index_mapping,
ids_to_tags=self._invert_mapping(tag_id_index_mapping),
num_tags=len(tag_id_index_mapping),
)
)
return _tag_specs
@staticmethod
def _tag_id_index_mapping_for(
tag_name: Text, training_data: TrainingData
) -> Optional[Dict[Text, int]]:
"""Create mapping from tag name to id."""
if tag_name == ENTITY_ATTRIBUTE_ROLE:
distinct_tags = training_data.entity_roles
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
distinct_tags = training_data.entity_groups
else:
distinct_tags = training_data.entities
distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None}
if not distinct_tags:
return None
tag_id_dict = {
tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1)
}
# NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index
# needed for correct prediction for padding
tag_id_dict[NO_ENTITY_TAG] = 0
return tag_id_dict
@staticmethod
def _find_example_for_label(
label: Text, examples: List[Message], attribute: Text
) -> Optional[Message]:
for ex in examples:
if ex.get(attribute) == label:
return ex
return None
def _check_labels_features_exist(
self, labels_example: List[Message], attribute: Text
) -> bool:
"""Checks if all labels have features set."""
return all(
label_example.features_present(
attribute, self.component_config[FEATURIZERS]
)
for label_example in labels_example
)
def _extract_features(
self, message: Message, attribute: Text
) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:
(
sparse_sequence_features,
sparse_sentence_features,
) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])
dense_sequence_features, dense_sentence_features = message.get_dense_features(
attribute, self.component_config[FEATURIZERS]
)
if dense_sequence_features is not None and sparse_sequence_features is not None:
if (
dense_sequence_features.features.shape[0]
!= sparse_sequence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sequence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
if dense_sentence_features is not None and sparse_sentence_features is not None:
if (
dense_sentence_features.features.shape[0]
!= sparse_sentence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sentence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
# If we don't use the transformer and we don't want to do entity recognition,
# to speed up training take only the sentence features as feature vector.
# We would not make use of the sequence anyway in this setup. Carrying over
# those features to the actual training process takes quite some time.
if (
self.component_config[NUM_TRANSFORMER_LAYERS] == 0
and not self.component_config[ENTITY_RECOGNITION]
and attribute not in [INTENT, INTENT_RESPONSE_KEY]
):
sparse_sequence_features = None
dense_sequence_features = None
out = {}
if sparse_sentence_features is not None:
out[f"{SPARSE}_{SENTENCE}"] = sparse_sentence_features.features
if sparse_sequence_features is not None:
out[f"{SPARSE}_{SEQUENCE}"] = sparse_sequence_features.features
if dense_sentence_features is not None:
out[f"{DENSE}_{SENTENCE}"] = dense_sentence_features.features
if dense_sequence_features is not None:
out[f"{DENSE}_{SEQUENCE}"] = dense_sequence_features.features
return out
def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:
"""Checks if features have same dimensionality if hidden layers are shared."""
if self.component_config.get(SHARE_HIDDEN_LAYERS):
num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)
num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)
num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)
num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)
if (0 < num_text_sentence_features != num_label_sentence_features > 0) or (
0 < num_text_sequence_features != num_label_sequence_features > 0
):
raise ValueError(
"If embeddings are shared text features and label features "
"must coincide. Check the output dimensions of previous components."
)
def _extract_labels_precomputed_features(
self, label_examples: List[Message], attribute: Text = INTENT
) -> Tuple[List[FeatureArray], List[FeatureArray]]:
"""Collects precomputed encodings."""
features = defaultdict(list)
for e in label_examples:
label_features = self._extract_features(e, attribute)
for feature_key, feature_value in label_features.items():
features[feature_key].append(feature_value)
sequence_features = []
sentence_features = []
for feature_name, feature_value in features.items():
if SEQUENCE in feature_name:
sequence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
else:
sentence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
return sequence_features, sentence_features
@staticmethod
def _compute_default_label_features(
labels_example: List[Message],
) -> List[FeatureArray]:
"""Computes one-hot representation for the labels."""
logger.debug("No label features found. Computing default label features.")
eye_matrix = np.eye(len(labels_example), dtype=np.float32)
# add sequence dimension to one-hot labels
return [
FeatureArray(
np.array([np.expand_dims(a, 0) for a in eye_matrix]),
number_of_dimensions=3,
)
]
def _create_label_data(
self,
training_data: TrainingData,
label_id_dict: Dict[Text, int],
attribute: Text,
) -> RasaModelData:
"""Create matrix with label_ids encoded in rows as bag of words.
Find a training example for each label and get the encoded features
from the corresponding Message object.
If the features are already computed, fetch them from the message object
else compute a one hot encoding for the label as the feature vector.
"""
# Collect one example for each label
labels_idx_examples = []
for label_name, idx in label_id_dict.items():
label_example = self._find_example_for_label(
label_name, training_data.intent_examples, attribute
)
labels_idx_examples.append((idx, label_example))
# Sort the list of tuples based on label_idx
labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])
labels_example = [example for (_, example) in labels_idx_examples]
# Collect features, precomputed if they exist, else compute on the fly
if self._check_labels_features_exist(labels_example, attribute):
(
sequence_features,
sentence_features,
) = self._extract_labels_precomputed_features(labels_example, attribute)
else:
sequence_features = None
sentence_features = self._compute_default_label_features(labels_example)
label_data = RasaModelData()
label_data.add_features(LABEL, SEQUENCE, sequence_features)
label_data.add_features(LABEL, SENTENCE, sentence_features)
if label_data.does_feature_not_exist(
LABEL, SENTENCE
) and label_data.does_feature_not_exist(LABEL, SEQUENCE):
raise ValueError(
"No label features are present. Please check your configuration file."
)
label_ids = np.array([idx for (idx, _) in labels_idx_examples])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
return label_data
def _use_default_label_features(self, label_ids: np.ndarray) -> List[FeatureArray]:
feature_arrays: List[FeatureArray] = self._label_data.get(LABEL, SENTENCE)
all_label_features = feature_arrays[0]
return [
FeatureArray(
np.array([all_label_features[label_id] for label_id in label_ids]),
number_of_dimensions=all_label_features.number_of_dimensions,
)
]
def _create_model_data(
self,
training_data: List[Message],
label_id_dict: Optional[Dict[Text, int]] = None,
label_attribute: Optional[Text] = None,
training: bool = True,
) -> RasaModelData:
"""Prepare data for training and create a RasaModelData object."""
from rasa.utils.tensorflow import model_data_utils
attributes_to_consider = [TEXT]
if training and self.component_config[INTENT_CLASSIFICATION]:
# we don't have any intent labels during prediction, just add them during
# training
attributes_to_consider.append(label_attribute)
if (
training
and self.component_config[ENTITY_RECOGNITION]
and self._entity_tag_specs
):
# Add entities as labels only during training and only if there was
# training data added for entities with DIET configured to predict entities.
attributes_to_consider.append(ENTITIES)
if training and label_attribute is not None:
# only use those training examples that have the label_attribute set
# during training
training_data = [
example for example in training_data if label_attribute in example.data
]
training_data = [
message
for message in training_data
if message.features_present(
attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS)
)
]
if not training_data:
# no training data are present to train
return RasaModelData()
(
features_for_examples,
sparse_feature_sizes,
) = model_data_utils.featurize_training_examples(
training_data,
attributes_to_consider,
entity_tag_specs=self._entity_tag_specs,
featurizers=self.component_config[FEATURIZERS],
bilou_tagging=self.component_config[BILOU_FLAG],
)
attribute_data, _ = model_data_utils.convert_to_data_format(
features_for_examples, consider_dialogue_dimension=False
)
model_data = RasaModelData(
label_key=self.label_key, label_sub_key=self.label_sub_key
)
model_data.add_data(attribute_data)
model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)
# Current implementation doesn't yet account for updating sparse
# feature sizes of label attributes. That's why we remove them.
sparse_feature_sizes = self._remove_label_sparse_feature_sizes(
sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute
)
model_data.add_sparse_feature_sizes(sparse_feature_sizes)
self._add_label_features(
model_data, training_data, label_attribute, label_id_dict, training
)
# make sure all keys are in the same order during training and prediction
# as we rely on the order of key and sub-key when constructing the actual
# tensors from the model data
model_data.sort()
return model_data
@staticmethod
def _remove_label_sparse_feature_sizes(
sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
label_attribute: Optional[Text] = None,
) -> Dict[Text, Dict[Text, List[int]]]:
if label_attribute in sparse_feature_sizes:
del sparse_feature_sizes[label_attribute]
return sparse_feature_sizes
def _add_label_features(
self,
model_data: RasaModelData,
training_data: List[Message],
label_attribute: Text,
label_id_dict: Dict[Text, int],
training: bool = True,
) -> None:
label_ids = []
if training and self.component_config[INTENT_CLASSIFICATION]:
for example in training_data:
if example.get(label_attribute):
label_ids.append(label_id_dict[example.get(label_attribute)])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
model_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
if (
label_attribute
and model_data.does_feature_not_exist(label_attribute, SENTENCE)
and model_data.does_feature_not_exist(label_attribute, SEQUENCE)
):
# no label features are present, get default features from _label_data
model_data.add_features(
LABEL, SENTENCE, self._use_default_label_features(np.array(label_ids))
)
# as label_attribute can have different values, e.g. INTENT or RESPONSE,
# copy over the features to the LABEL key to make
# it easier to access the label features inside the model itself
model_data.update_key(label_attribute, SENTENCE, LABEL, SENTENCE)
model_data.update_key(label_attribute, SEQUENCE, LABEL, SEQUENCE)
model_data.update_key(label_attribute, MASK, LABEL, MASK)
model_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
# train helpers
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
"""Prepares data for training.
Performs sanity checks on training data, extracts encodings for labels.
"""
if self.component_config[BILOU_FLAG]:
bilou_utils.apply_bilou_schema(training_data)
label_id_index_mapping = self._label_id_index_mapping(
training_data, attribute=INTENT
)
if not label_id_index_mapping:
# no labels are present to train
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(
training_data, label_id_index_mapping, attribute=INTENT
)
self._entity_tag_specs = self._create_entity_tag_specs(training_data)
label_attribute = (
INTENT if self.component_config[INTENT_CLASSIFICATION] else None
)
model_data = self._create_model_data(
training_data.nlu_examples,
label_id_index_mapping,
label_attribute=label_attribute,
)
self._check_input_dimension_consistency(model_data)
return model_data
@staticmethod
def _check_enough_labels(model_data: RasaModelData) -> bool:
return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2
def train(self, training_data: TrainingData) -> Resource:
"""Train the embedding intent classifier on a data set."""
model_data = self.preprocess_train_data(training_data)
if model_data.is_empty():
logger.debug(
f"Cannot train '{self.__class__.__name__}'. No data was provided. "
f"Skipping training of the classifier."
)
return self._resource
if not self.model and self.finetune_mode:
raise rasa.shared.exceptions.InvalidParameterException(
f"{self.__class__.__name__} was instantiated "
f"with `model=None` and `finetune_mode=True`. "
f"This is not a valid combination as the component "
f"needs an already instantiated and trained model "
f"to continue training in finetune mode."
)
if self.component_config.get(INTENT_CLASSIFICATION):
if not self._check_enough_labels(model_data):
logger.error(
f"Cannot train '{self.__class__.__name__}'. "
f"Need at least 2 different intent classes. "
f"Skipping training of classifier."
)
return self._resource
if self.component_config.get(ENTITY_RECOGNITION):
self.check_correct_entity_annotations(training_data)
# keep one example for persisting and loading
self._data_example = model_data.first_data_example()
if not self.finetune_mode:
# No pre-trained model to load from. Create a new instance of the model.
self.model = self._instantiate_model_class(model_data)
self.model.compile(
optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE])
)
else:
self.model.adjust_for_incremental_training(
data_example=self._data_example,
new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(),
old_sparse_feature_sizes=self._sparse_feature_sizes,
)
self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()
data_generator, validation_data_generator = train_utils.create_data_generators(
model_data,
self.component_config[BATCH_SIZES],
self.component_config[EPOCHS],
self.component_config[BATCH_STRATEGY],
self.component_config[EVAL_NUM_EXAMPLES],
self.component_config[RANDOM_SEED],
)
callbacks = train_utils.create_common_callbacks(
self.component_config[EPOCHS],
self.component_config[TENSORBOARD_LOG_DIR],
self.component_config[TENSORBOARD_LOG_LEVEL],
self.tmp_checkpoint_dir,
)
self.model.fit(
data_generator,
epochs=self.component_config[EPOCHS],
validation_data=validation_data_generator,
validation_freq=self.component_config[EVAL_NUM_EPOCHS],
callbacks=callbacks,
verbose=False,
shuffle=False, # we use custom shuffle inside data generator
)
self.persist()
return self._resource
# process helpers
def _predict(
self, message: Message
) -> Optional[Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]]:
if self.model is None:
logger.debug(
f"There is no trained model for '{self.__class__.__name__}': The "
f"component is either not trained or didn't receive enough training "
f"data."
)
return None
# create session data from message and convert it into a batch of 1
model_data = self._create_model_data([message], training=False)
if model_data.is_empty():
return None
return self.model.run_inference(model_data)
def _predict_label(
self, predict_out: Optional[Dict[Text, tf.Tensor]]
) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]:
"""Predicts the intent of the provided message."""
label: Dict[Text, Any] = {"name": None, "confidence": 0.0}
label_ranking = []
if predict_out is None:
return label, label_ranking
message_sim = predict_out["i_scores"]
message_sim = message_sim.flatten() # sim is a matrix
# if X contains all zeros do not predict some label
if message_sim.size == 0:
return label, label_ranking
# rank the confidences
ranking_length = self.component_config[RANKING_LENGTH]
renormalize = (
self.component_config[RENORMALIZE_CONFIDENCES]
and self.component_config[MODEL_CONFIDENCE] == SOFTMAX
)
ranked_label_indices, message_sim = train_utils.rank_and_mask(
message_sim, ranking_length=ranking_length, renormalize=renormalize
)
# construct the label and ranking
casted_message_sim: List[float] = message_sim.tolist() # np.float to float
top_label_idx = ranked_label_indices[0]
label = {
"name": self.index_label_id_mapping[top_label_idx],
"confidence": casted_message_sim[top_label_idx],
}
ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]
label_ranking = [
{"name": self.index_label_id_mapping[label_idx], "confidence": score}
for label_idx, score in ranking
]
return label, label_ranking
def _predict_entities(
self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message
) -> List[Dict]:
if predict_out is None:
return []
predicted_tags, confidence_values = train_utils.entity_label_to_tags(
predict_out, self._entity_tag_specs, self.component_config[BILOU_FLAG]
)
entities = self.convert_predictions_into_entities(
message.get(TEXT),
message.get(TOKENS_NAMES[TEXT], []),
predicted_tags,
self.split_entities_config,
confidence_values,
)
entities = self.add_extractor_name(entities)
entities = message.get(ENTITIES, []) + entities
return entities
def process(self, messages: List[Message]) -> List[Message]:
"""Augments the message with intents, entities, and diagnostic data."""
for message in messages:
out = self._predict(message)
if self.component_config[INTENT_CLASSIFICATION]:
label, label_ranking = self._predict_label(out)
message.set(INTENT, label, add_to_output=True)
message.set("intent_ranking", label_ranking, add_to_output=True)
if self.component_config[ENTITY_RECOGNITION]:
entities = self._predict_entities(out, message)
message.set(ENTITIES, entities, add_to_output=True)
if out and self._execution_context.should_add_diagnostic_data:
message.add_diagnostic_data(
self._execution_context.node_name, out.get(DIAGNOSTIC_DATA)
)
return messages
def persist(self) -> None:
"""Persist this model into the passed directory."""
if self.model is None:
return None
with self._model_storage.write_to(self._resource) as model_path:
file_name = self.__class__.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
rasa.shared.utils.io.create_directory_for_file(tf_model_file)
if self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir:
self.model.load_weights(self.tmp_checkpoint_dir / "checkpoint.tf_model")
# Save an empty file to flag that this model has been
# produced using checkpointing
checkpoint_marker = model_path / f"{file_name}.from_checkpoint.pkl"
checkpoint_marker.touch()
self.model.save(str(tf_model_file))
io_utils.pickle_dump(
model_path / f"{file_name}.data_example.pkl", self._data_example
)
io_utils.pickle_dump(
model_path / f"{file_name}.sparse_feature_sizes.pkl",
self._sparse_feature_sizes,
)
io_utils.pickle_dump(
model_path / f"{file_name}.label_data.pkl", dict(self._label_data.data)
)
io_utils.json_pickle(
model_path / f"{file_name}.index_label_id_mapping.json",
self.index_label_id_mapping,
)
entity_tag_specs = (
[tag_spec._asdict() for tag_spec in self._entity_tag_specs]
if self._entity_tag_specs
else []
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
model_path / f"{file_name}.entity_tag_specs.json", entity_tag_specs
)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> DIETClassifier:
"""Loads a policy from the storage (see parent class for full docstring)."""
try:
with model_storage.read_from(resource) as model_path:
return cls._load(
model_path, config, model_storage, resource, execution_context
)
except ValueError:
logger.debug(
f"Failed to load {cls.__class__.__name__} from model storage. Resource "
f"'{resource.name}' doesn't exist."
)
return cls(config, model_storage, resource, execution_context)
@classmethod
def _load(
cls,
model_path: Path,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> "DIETClassifier":
"""Loads the trained model from the provided directory."""
(
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
) = cls._load_from_files(model_path)
config = train_utils.update_confidence_type(config)
config = train_utils.update_similarity_type(config)
model = cls._load_model(
entity_tag_specs,
label_data,
config,
data_example,
model_path,
finetune_mode=execution_context.is_finetuning,
)
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
index_label_id_mapping=index_label_id_mapping,
entity_tag_specs=entity_tag_specs,
model=model,
sparse_feature_sizes=sparse_feature_sizes,
)
@classmethod
def _load_from_files(
cls, model_path: Path
) -> Tuple[
Dict[int, Text],
List[EntityTagSpec],
RasaModelData,
Dict[Text, Dict[Text, List[FeatureArray]]],
Dict[Text, Dict[Text, List[int]]],
]:
file_name = cls.__name__
data_example = io_utils.pickle_load(
model_path / f"{file_name}.data_example.pkl"
)
label_data = io_utils.pickle_load(model_path / f"{file_name}.label_data.pkl")
label_data = RasaModelData(data=label_data)
sparse_feature_sizes = io_utils.pickle_load(
model_path / f"{file_name}.sparse_feature_sizes.pkl"
)
index_label_id_mapping = io_utils.json_unpickle(
model_path / f"{file_name}.index_label_id_mapping.json"
)
entity_tag_specs = rasa.shared.utils.io.read_json_file(
model_path / f"{file_name}.entity_tag_specs.json"
)
entity_tag_specs = [
EntityTagSpec(
tag_name=tag_spec["tag_name"],
ids_to_tags={
int(key): value for key, value in tag_spec["ids_to_tags"].items()
},
tags_to_ids={
key: int(value) for key, value in tag_spec["tags_to_ids"].items()
},
num_tags=tag_spec["num_tags"],
)
for tag_spec in entity_tag_specs
]
# jsonpickle converts dictionary keys to strings
index_label_id_mapping = {
int(key): value for key, value in index_label_id_mapping.items()
}
return (
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
)
@classmethod
def _load_model(
cls,
entity_tag_specs: List[EntityTagSpec],
label_data: RasaModelData,
config: Dict[Text, Any],
data_example: Dict[Text, Dict[Text, List[FeatureArray]]],
model_path: Path,
finetune_mode: bool = False,
) -> "RasaModel":
file_name = cls.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
label_key = LABEL_KEY if config[INTENT_CLASSIFICATION] else None
label_sub_key = LABEL_SUB_KEY if config[INTENT_CLASSIFICATION] else None
model_data_example = RasaModelData(
label_key=label_key, label_sub_key=label_sub_key, data=data_example
)
model = cls._load_model_class(
tf_model_file,
model_data_example,
label_data,
entity_tag_specs,
config,
finetune_mode=finetune_mode,
)
return model
@classmethod
def _load_model_class(
cls,
tf_model_file: Text,
model_data_example: RasaModelData,
label_data: RasaModelData,
entity_tag_specs: List[EntityTagSpec],
config: Dict[Text, Any],
finetune_mode: bool,
) -> "RasaModel":
predict_data_example = RasaModelData(
label_key=model_data_example.label_key,
data={
feature_name: features
for feature_name, features in model_data_example.items()
if TEXT in feature_name
},
)
return cls.model_class().load(
tf_model_file,
model_data_example,
predict_data_example,
data_signature=model_data_example.get_signature(),
label_data=label_data,
entity_tag_specs=entity_tag_specs,
config=copy.deepcopy(config),
finetune_mode=finetune_mode,
)
def _instantiate_model_class(self, model_data: RasaModelData) -> "RasaModel":
return self.model_class()(
data_signature=model_data.get_signature(),
label_data=self._label_data,
entity_tag_specs=self._entity_tag_specs,
config=self.component_config,
)
class DIET(TransformerRasaModel):
def __init__(
self,
data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],
label_data: RasaModelData,
entity_tag_specs: Optional[List[EntityTagSpec]],
config: Dict[Text, Any],
) -> None:
# create entity tag spec before calling super otherwise building the model
# will fail
super().__init__("DIET", config, data_signature, label_data)
self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs)
self.predict_data_signature = {
feature_name: features
for feature_name, features in data_signature.items()
if TEXT in feature_name
}
# tf training
self._create_metrics()
self._update_metrics_to_log()
# needed for efficient prediction
self.all_labels_embed: Optional[tf.Tensor] = None
self._prepare_layers()
@staticmethod
def _ordered_tag_specs(
entity_tag_specs: Optional[List[EntityTagSpec]],
) -> List[EntityTagSpec]:
"""Ensure that order of entity tag specs matches CRF layer order."""
if entity_tag_specs is None:
return []
crf_order = [
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_ROLE,
ENTITY_ATTRIBUTE_GROUP,
]
ordered_tag_spec = []
for tag_name in crf_order:
for tag_spec in entity_tag_specs:
if tag_name == tag_spec.tag_name:
ordered_tag_spec.append(tag_spec)
return ordered_tag_spec
def _check_data(self) -> None:
if TEXT not in self.data_signature:
raise InvalidConfigException(
f"No text features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[INTENT_CLASSIFICATION]:
if LABEL not in self.data_signature:
raise InvalidConfigException(
f"No label features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[SHARE_HIDDEN_LAYERS]:
different_sentence_signatures = False
different_sequence_signatures = False
if (
SENTENCE in self.data_signature[TEXT]
and SENTENCE in self.data_signature[LABEL]
):
different_sentence_signatures = (
self.data_signature[TEXT][SENTENCE]
!= self.data_signature[LABEL][SENTENCE]
)
if (
SEQUENCE in self.data_signature[TEXT]
and SEQUENCE in self.data_signature[LABEL]
):
different_sequence_signatures = (
self.data_signature[TEXT][SEQUENCE]
!= self.data_signature[LABEL][SEQUENCE]
)
if different_sentence_signatures or different_sequence_signatures:
raise ValueError(
"If hidden layer weights are shared, data signatures "
"for text_features and label_features must coincide."
)
if self.config[ENTITY_RECOGNITION] and (
ENTITIES not in self.data_signature
or ENTITY_ATTRIBUTE_TYPE not in self.data_signature[ENTITIES]
):
logger.debug(
f"You specified '{self.__class__.__name__}' to train entities, but "
f"no entities are present in the training data. Skipping training of "
f"entities."
)
self.config[ENTITY_RECOGNITION] = False
def _create_metrics(self) -> None:
# self.metrics will have the same order as they are created
# so create loss metrics first to output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.intent_loss = tf.keras.metrics.Mean(name="i_loss")
self.entity_loss = tf.keras.metrics.Mean(name="e_loss")
self.entity_group_loss = tf.keras.metrics.Mean(name="g_loss")
self.entity_role_loss = tf.keras.metrics.Mean(name="r_loss")
# create accuracy metrics second to output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.intent_acc = tf.keras.metrics.Mean(name="i_acc")
self.entity_f1 = tf.keras.metrics.Mean(name="e_f1")
self.entity_group_f1 = tf.keras.metrics.Mean(name="g_f1")
self.entity_role_f1 = tf.keras.metrics.Mean(name="r_f1")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
if self.config[INTENT_CLASSIFICATION]:
self.metrics_to_log.append("i_acc")
if debug_log_level:
self.metrics_to_log.append("i_loss")
if self.config[ENTITY_RECOGNITION]:
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags != 0:
name = tag_spec.tag_name
self.metrics_to_log.append(f"{name[0]}_f1")
if debug_log_level:
self.metrics_to_log.append(f"{name[0]}_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {
"t": "total",
"i": "intent",
"e": "entity",
"m": "mask",
"r": "role",
"g": "group",
}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _prepare_layers(self) -> None:
# For user text, prepare layers that combine different feature types, embed
# everything using a transformer and optionally also do masked language
# modeling.
self.text_name = TEXT
self._tf_layers[
f"sequence_layer.{self.text_name}"
] = rasa_layers.RasaSequenceLayer(
self.text_name, self.data_signature[self.text_name], self.config
)
if self.config[MASKED_LM]:
self._prepare_mask_lm_loss(self.text_name)
# Intent labels are treated similarly to user text but without the transformer,
# without masked language modelling, and with no dropout applied to the
# individual features, only to the overall label embedding after all label
# features have been combined.
if self.config[INTENT_CLASSIFICATION]:
self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL
# disable input dropout applied to sparse and dense label features
label_config = self.config.copy()
label_config.update(
{SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}
)
self._tf_layers[
f"feature_combining_layer.{self.label_name}"
] = rasa_layers.RasaFeatureCombiningLayer(
self.label_name, self.label_signature[self.label_name], label_config
)
self._prepare_ffnn_layer(
self.label_name,
self.config[HIDDEN_LAYERS_SIZES][self.label_name],
self.config[DROP_RATE],
)
self._prepare_label_classification_layers(predictor_attribute=TEXT)
if self.config[ENTITY_RECOGNITION]:
self._prepare_entity_recognition_layers()
def _prepare_mask_lm_loss(self, name: Text) -> None:
# for embedding predicted tokens at masked positions
self._prepare_embed_layers(f"{name}_lm_mask")
# for embedding the true tokens that got masked
self._prepare_embed_layers(f"{name}_golden_token")
# mask loss is additional loss
# set scaling to False, so that it doesn't overpower other losses
self._prepare_dot_product_loss(f"{name}_mask", scale_loss=False)
def _create_bow(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sequence_feature_lengths: tf.Tensor,
name: Text,
) -> tf.Tensor:
x, _ = self._tf_layers[f"feature_combining_layer.{name}"](
(sequence_features, sentence_features, sequence_feature_lengths),
training=self._training,
)
# convert to bag-of-words by summing along the sequence dimension
x = tf.reduce_sum(x, axis=1)
return self._tf_layers[f"ffnn.{name}"](x, self._training)
def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:
all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]
sequence_feature_lengths = self._get_sequence_feature_lengths(
self.tf_label_data, LABEL
)
x = self._create_bow(
self.tf_label_data[LABEL][SEQUENCE],
self.tf_label_data[LABEL][SENTENCE],
sequence_feature_lengths,
self.label_name,
)
all_labels_embed = self._tf_layers[f"embed.{LABEL}"](x)
return all_label_ids, all_labels_embed
def _mask_loss(
self,
outputs: tf.Tensor,
inputs: tf.Tensor,
seq_ids: tf.Tensor,
mlm_mask_boolean: tf.Tensor,
name: Text,
) -> tf.Tensor:
# make sure there is at least one element in the mask
mlm_mask_boolean = tf.cond(
tf.reduce_any(mlm_mask_boolean),
lambda: mlm_mask_boolean,
lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(mlm_mask_boolean)),
)
mlm_mask_boolean = tf.squeeze(mlm_mask_boolean, -1)
# Pick elements that were masked, throwing away the batch & sequence dimension
# and effectively switching from shape (batch_size, sequence_length, units) to
# (num_masked_elements, units).
outputs = tf.boolean_mask(outputs, mlm_mask_boolean)
inputs = tf.boolean_mask(inputs, mlm_mask_boolean)
ids = tf.boolean_mask(seq_ids, mlm_mask_boolean)
tokens_predicted_embed = self._tf_layers[f"embed.{name}_lm_mask"](outputs)
tokens_true_embed = self._tf_layers[f"embed.{name}_golden_token"](inputs)
# To limit the otherwise computationally expensive loss calculation, we
# constrain the label space in MLM (i.e. token space) to only those tokens that
# were masked in this batch. Hence the reduced list of token embeddings
# (tokens_true_embed) and the reduced list of labels (ids) are passed as
# all_labels_embed and all_labels, respectively. In the future, we could be less
# restrictive and construct a slightly bigger label space which could include
# tokens not masked in the current batch too.
return self._tf_layers[f"loss.{name}_mask"](
inputs_embed=tokens_predicted_embed,
labels_embed=tokens_true_embed,
labels=ids,
all_labels_embed=tokens_true_embed,
all_labels=ids,
)
def _calculate_label_loss(
self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor
) -> tf.Tensor:
all_label_ids, all_labels_embed = self._create_all_labels()
text_embed = self._tf_layers[f"embed.{TEXT}"](text_features)
label_embed = self._tf_layers[f"embed.{LABEL}"](label_features)
return self._tf_layers[f"loss.{LABEL}"](
text_embed, label_embed, label_ids, all_labels_embed, all_label_ids
)
def batch_loss(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> tf.Tensor:
"""Calculates the loss for the given batch.
Args:
batch_in: The batch.
Returns:
The loss of the given batch.
"""
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
(
text_transformed,
text_in,
mask_combined_sequence_sentence,
text_seq_ids,
mlm_mask_boolean_text,
_,
) = self._tf_layers[f"sequence_layer.{self.text_name}"](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
losses = []
# Lengths of sequences in case of sentence-level features are always 1, but they
# can effectively be 0 if sentence-level features aren't present.
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
combined_sequence_sentence_feature_lengths = (
sequence_feature_lengths + sentence_feature_lengths
)
if self.config[MASKED_LM]:
loss, acc = self._mask_loss(
text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT
)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
if self.config[INTENT_CLASSIFICATION]:
loss = self._batch_loss_intent(
combined_sequence_sentence_feature_lengths,
text_transformed,
tf_batch_data,
)
losses.append(loss)
if self.config[ENTITY_RECOGNITION]:
losses += self._batch_loss_entities(
mask_combined_sequence_sentence,
sequence_feature_lengths,
text_transformed,
tf_batch_data,
)
return tf.math.add_n(losses)
def _batch_loss_intent(
self,
combined_sequence_sentence_feature_lengths_text: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> tf.Tensor:
# get sentence features vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths_text
)
sequence_feature_lengths_label = self._get_sequence_feature_lengths(
tf_batch_data, LABEL
)
label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]
label = self._create_bow(
tf_batch_data[LABEL][SEQUENCE],
tf_batch_data[LABEL][SENTENCE],
sequence_feature_lengths_label,
self.label_name,
)
loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids)
self._update_label_metrics(loss, acc)
return loss
def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:
self.intent_loss.update_state(loss)
self.intent_acc.update_state(acc)
def _batch_loss_entities(
self,
mask_combined_sequence_sentence: tf.Tensor,
sequence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> List[tf.Tensor]:
losses = []
entity_tags = None
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags == 0:
continue
tag_ids = tf_batch_data[ENTITIES][tag_spec.tag_name][0]
# add a zero (no entity) for the sentence features to match the shape of
# inputs
tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]])
loss, f1, _logits = self._calculate_entity_loss(
text_transformed,
tag_ids,
mask_combined_sequence_sentence,
sequence_feature_lengths,
tag_spec.tag_name,
entity_tags,
)
if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags
)
self._update_entity_metrics(loss, f1, tag_spec.tag_name)
losses.append(loss)
return losses
def _update_entity_metrics(
self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text
) -> None:
if tag_name == ENTITY_ATTRIBUTE_TYPE:
self.entity_loss.update_state(loss)
self.entity_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
self.entity_group_loss.update_state(loss)
self.entity_group_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_ROLE:
self.entity_role_loss.update_state(loss)
self.entity_role_f1.update_state(f1)
def prepare_for_predict(self) -> None:
"""Prepares the model for prediction."""
if self.config[INTENT_CLASSIFICATION]:
_, self.all_labels_embed = self._create_all_labels()
def batch_predict(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> Dict[Text, tf.Tensor]:
"""Predicts the output of the given batch.
Args:
batch_in: The batch.
Returns:
The output to predict.
"""
tf_batch_data = self.batch_to_model_data_format(
batch_in, self.predict_data_signature
)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
text_transformed, _, _, _, _, attention_weights = self._tf_layers[
f"sequence_layer.{self.text_name}"
](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
predictions = {
DIAGNOSTIC_DATA: {
"attention_weights": attention_weights,
"text_transformed": text_transformed,
}
}
if self.config[INTENT_CLASSIFICATION]:
predictions.update(
self._batch_predict_intents(
sequence_feature_lengths + sentence_feature_lengths,
text_transformed,
)
)
if self.config[ENTITY_RECOGNITION]:
predictions.update(
self._batch_predict_entities(sequence_feature_lengths, text_transformed)
)
return predictions
def _batch_predict_entities(
self, sequence_feature_lengths: tf.Tensor, text_transformed: tf.Tensor
) -> Dict[Text, tf.Tensor]:
predictions: Dict[Text, tf.Tensor] = {}
entity_tags = None
for tag_spec in self._entity_tag_specs:
# skip crf layer if it was not trained
if tag_spec.num_tags == 0:
continue
name = tag_spec.tag_name
_input = text_transformed
if entity_tags is not None:
_tags = self._tf_layers[f"embed.{name}.tags"](entity_tags)
_input = tf.concat([_input, _tags], axis=-1)
_logits = self._tf_layers[f"embed.{name}.logits"](_input)
pred_ids, confidences = self._tf_layers[f"crf.{name}"](
_logits, sequence_feature_lengths
)
predictions[f"e_{name}_ids"] = pred_ids
predictions[f"e_{name}_scores"] = confidences
if name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags
)
return predictions
def _batch_predict_intents(
self,
combined_sequence_sentence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
) -> Dict[Text, tf.Tensor]:
if self.all_labels_embed is None:
raise ValueError(
"The model was not prepared for prediction. "
"Call `prepare_for_predict` first."
)
# get sentence feature vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths
)
sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector)
_, scores = self._tf_layers[
f"loss.{LABEL}"
].get_similarities_and_confidences_from_embeddings(
sentence_vector_embed[:, tf.newaxis, :],
self.all_labels_embed[tf.newaxis, :, :],
)
return {"i_scores": scores}
|
batch_predict
|
Predicts the output of the given batch.
Args:
batch_in: The batch.
Returns:
The output to predict.
|
from __future__ import annotations
import copy
import logging
from collections import defaultdict
from pathlib import Path
from rasa.nlu.featurizers.featurizer import Featurizer
import numpy as np
import scipy.sparse
import tensorflow as tf
from typing import Any, Dict, List, Optional, Text, Tuple, Union, Type
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.extractors.extractor import EntityExtractorMixin
from rasa.nlu.classifiers.classifier import IntentClassifier
import rasa.shared.utils.io
import rasa.utils.io as io_utils
import rasa.nlu.utils.bilou_utils as bilou_utils
from rasa.shared.constants import DIAGNOSTIC_DATA
from rasa.nlu.extractors.extractor import EntityTagSpec
from rasa.nlu.classifiers import LABEL_RANKING_LENGTH
from rasa.utils import train_utils
from rasa.utils.tensorflow import rasa_layers
from rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel
from rasa.utils.tensorflow.model_data import (
RasaModelData,
FeatureSignature,
FeatureArray,
)
from rasa.nlu.constants import TOKENS_NAMES, DEFAULT_TRANSFORMER_SIZE
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
TEXT,
INTENT,
INTENT_RESPONSE_KEY,
ENTITIES,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_GROUP,
ENTITY_ATTRIBUTE_ROLE,
NO_ENTITY_TAG,
SPLIT_ENTITIES_BY_COMMA,
)
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.utils.tensorflow.constants import (
LABEL,
IDS,
HIDDEN_LAYERS_SIZES,
RENORMALIZE_CONFIDENCES,
SHARE_HIDDEN_LAYERS,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
LEARNING_RATE,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
TENSORBOARD_LOG_DIR,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
UNIDIRECTIONAL_ENCODER,
DROP_RATE,
DROP_RATE_ATTENTION,
CONNECTION_DENSITY,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
BILOU_FLAG,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
AUTO,
BALANCED,
CROSS_ENTROPY,
TENSORBOARD_LOG_LEVEL,
CONCAT_DIMENSION,
FEATURIZERS,
CHECKPOINT_MODEL,
SEQUENCE,
SENTENCE,
SEQUENCE_LENGTH,
DENSE_DIMENSION,
MASK,
CONSTRAIN_SIMILARITIES,
MODEL_CONFIDENCE,
SOFTMAX,
)
logger = logging.getLogger(__name__)
SPARSE = "sparse"
DENSE = "dense"
LABEL_KEY = LABEL
LABEL_SUB_KEY = IDS
POSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]
@DefaultV1Recipe.register(
[
DefaultV1Recipe.ComponentType.INTENT_CLASSIFIER,
DefaultV1Recipe.ComponentType.ENTITY_EXTRACTOR,
],
is_trainable=True,
)
class DIETClassifier(GraphComponent, IntentClassifier, EntityExtractorMixin):
"""A multi-task model for intent classification and entity extraction.
DIET is Dual Intent and Entity Transformer.
The architecture is based on a transformer which is shared for both tasks.
A sequence of entity labels is predicted through a Conditional Random Field (CRF)
tagging layer on top of the transformer output sequence corresponding to the
input sequence of tokens. The transformer output for the ``__CLS__`` token and
intent labels are embedded into a single semantic vector space. We use the
dot-product loss to maximize the similarity with the target label and minimize
similarities with negative samples.
"""
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [Featurizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""The component's default config (see parent class for full docstring)."""
# please make sure to update the docs when changing a default parameter
return {
# ## Architecture of the used neural network
# Hidden layer sizes for layers before the embedding layers for user message
# and labels.
# The number of hidden layers is equal to the length of the corresponding
# list.
HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []},
# Whether to share the hidden layer weights between user message and labels.
SHARE_HIDDEN_LAYERS: False,
# Number of units in transformer
TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE,
# Number of transformer layers
NUM_TRANSFORMER_LAYERS: 2,
# Number of attention heads in transformer
NUM_HEADS: 4,
# If 'True' use key relative embeddings in attention
KEY_RELATIVE_ATTENTION: False,
# If 'True' use value relative embeddings in attention
VALUE_RELATIVE_ATTENTION: False,
# Max position for relative embeddings. Only in effect if key- or value
# relative attention are turned on
MAX_RELATIVE_POSITION: 5,
# Use a unidirectional or bidirectional encoder.
UNIDIRECTIONAL_ENCODER: False,
# ## Training parameters
# Initial and final batch sizes:
# Batch size will be linearly increased for each epoch.
BATCH_SIZES: [64, 256],
# Strategy used when creating batches.
# Can be either 'sequence' or 'balanced'.
BATCH_STRATEGY: BALANCED,
# Number of epochs to train
EPOCHS: 300,
# Set random seed to any 'int' to get reproducible results
RANDOM_SEED: None,
# Initial learning rate for the optimizer
LEARNING_RATE: 0.001,
# ## Parameters for embeddings
# Dimension size of embedding vectors
EMBEDDING_DIMENSION: 20,
# Dense dimension to use for sparse features.
DENSE_DIMENSION: {TEXT: 128, LABEL: 20},
# Default dimension to use for concatenating sequence and sentence features.
CONCAT_DIMENSION: {TEXT: 128, LABEL: 20},
# The number of incorrect labels. The algorithm will minimize
# their similarity to the user input during training.
NUM_NEG: 20,
# Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.
SIMILARITY_TYPE: AUTO,
# The type of the loss function, either 'cross_entropy' or 'margin'.
LOSS_TYPE: CROSS_ENTROPY,
# Number of top intents for which confidences should be reported.
# Set to 0 if confidences for all intents should be reported.
RANKING_LENGTH: LABEL_RANKING_LENGTH,
# Indicates how similar the algorithm should try to make embedding vectors
# for correct labels.
# Should be 0.0 < ... < 1.0 for 'cosine' similarity type.
MAX_POS_SIM: 0.8,
# Maximum negative similarity for incorrect labels.
# Should be -1.0 < ... < 1.0 for 'cosine' similarity type.
MAX_NEG_SIM: -0.4,
# If 'True' the algorithm only minimizes maximum similarity over
# incorrect intent labels, used only if 'loss_type' is set to 'margin'.
USE_MAX_NEG_SIM: True,
# If 'True' scale loss inverse proportionally to the confidence
# of the correct prediction
SCALE_LOSS: False,
# ## Regularization parameters
# The scale of regularization
REGULARIZATION_CONSTANT: 0.002,
# The scale of how important is to minimize the maximum similarity
# between embeddings of different labels,
# used only if 'loss_type' is set to 'margin'.
NEGATIVE_MARGIN_SCALE: 0.8,
# Dropout rate for encoder
DROP_RATE: 0.2,
# Dropout rate for attention
DROP_RATE_ATTENTION: 0,
# Fraction of trainable weights in internal layers.
CONNECTION_DENSITY: 0.2,
# If 'True' apply dropout to sparse input tensors
SPARSE_INPUT_DROPOUT: True,
# If 'True' apply dropout to dense input tensors
DENSE_INPUT_DROPOUT: True,
# ## Evaluation parameters
# How often calculate validation accuracy.
# Small values may hurt performance.
EVAL_NUM_EPOCHS: 20,
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
# Set to 0 for no validation.
EVAL_NUM_EXAMPLES: 0,
# ## Model config
# If 'True' intent classification is trained and intent predicted.
INTENT_CLASSIFICATION: True,
# If 'True' named entity recognition is trained and entities predicted.
ENTITY_RECOGNITION: True,
# If 'True' random tokens of the input message will be masked and the model
# should predict those tokens.
MASKED_LM: False,
# 'BILOU_flag' determines whether to use BILOU tagging or not.
# If set to 'True' labelling is more rigorous, however more
# examples per entity are required.
# Rule of thumb: you should have more than 100 examples per entity.
BILOU_FLAG: True,
# If you want to use tensorboard to visualize training and validation
# metrics, set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'batch'
TENSORBOARD_LOG_LEVEL: "epoch",
# Perform model checkpointing
CHECKPOINT_MODEL: False,
# Specify what features to use as sequence and sentence features
# By default all features in the pipeline are used.
FEATURIZERS: [],
# Split entities by comma, this makes sense e.g. for a list of ingredients
# in a recipie, but it doesn't make sense for the parts of an address
SPLIT_ENTITIES_BY_COMMA: True,
# If 'True' applies sigmoid on all similarity terms and adds
# it to the loss function to ensure that similarity values are
# approximately bounded. Used inside cross-entropy loss only.
CONSTRAIN_SIMILARITIES: False,
# Model confidence to be returned during inference. Currently, the only
# possible value is `softmax`.
MODEL_CONFIDENCE: SOFTMAX,
# Determines whether the confidences of the chosen top intents should be
# renormalized so that they sum up to 1. By default, we do not renormalize
# and return the confidences for the top intents as is.
# Note that renormalization only makes sense if confidences are generated
# via `softmax`.
RENORMALIZE_CONFIDENCES: False,
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
index_label_id_mapping: Optional[Dict[int, Text]] = None,
entity_tag_specs: Optional[List[EntityTagSpec]] = None,
model: Optional[RasaModel] = None,
sparse_feature_sizes: Optional[Dict[Text, Dict[Text, List[int]]]] = None,
) -> None:
"""Declare instance variables with default values."""
if EPOCHS not in config:
rasa.shared.utils.io.raise_warning(
f"Please configure the number of '{EPOCHS}' in your configuration file."
f" We will change the default value of '{EPOCHS}' in the future to 1. "
)
self.component_config = config
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
self._check_config_parameters()
# transform numbers to labels
self.index_label_id_mapping = index_label_id_mapping or {}
self._entity_tag_specs = entity_tag_specs
self.model = model
self.tmp_checkpoint_dir = None
if self.component_config[CHECKPOINT_MODEL]:
self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())
self._label_data: Optional[RasaModelData] = None
self._data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None
self.split_entities_config = rasa.utils.train_utils.init_split_entities(
self.component_config[SPLIT_ENTITIES_BY_COMMA],
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
)
self.finetune_mode = self._execution_context.is_finetuning
self._sparse_feature_sizes = sparse_feature_sizes
# init helpers
def _check_masked_lm(self) -> None:
if (
self.component_config[MASKED_LM]
and self.component_config[NUM_TRANSFORMER_LAYERS] == 0
):
raise ValueError(
f"If number of transformer layers is 0, "
f"'{MASKED_LM}' option should be 'False'."
)
def _check_share_hidden_layers_sizes(self) -> None:
if self.component_config.get(SHARE_HIDDEN_LAYERS):
first_hidden_layer_sizes = next(
iter(self.component_config[HIDDEN_LAYERS_SIZES].values())
)
# check that all hidden layer sizes are the same
identical_hidden_layer_sizes = all(
current_hidden_layer_sizes == first_hidden_layer_sizes
for current_hidden_layer_sizes in self.component_config[
HIDDEN_LAYERS_SIZES
].values()
)
if not identical_hidden_layer_sizes:
raise ValueError(
f"If hidden layer weights are shared, "
f"{HIDDEN_LAYERS_SIZES} must coincide."
)
def _check_config_parameters(self) -> None:
self.component_config = train_utils.check_deprecated_options(
self.component_config
)
self._check_masked_lm()
self._check_share_hidden_layers_sizes()
self.component_config = train_utils.update_confidence_type(
self.component_config
)
train_utils.validate_configuration_settings(self.component_config)
self.component_config = train_utils.update_similarity_type(
self.component_config
)
self.component_config = train_utils.update_evaluation_parameters(
self.component_config
)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> DIETClassifier:
"""Creates a new untrained component (see parent class for full docstring)."""
return cls(config, model_storage, resource, execution_context)
@property
def label_key(self) -> Optional[Text]:
"""Return key if intent classification is activated."""
return LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@property
def label_sub_key(self) -> Optional[Text]:
"""Return sub key if intent classification is activated."""
return LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@staticmethod
def model_class() -> Type[RasaModel]:
return DIET
# training data helpers:
@staticmethod
def _label_id_index_mapping(
training_data: TrainingData, attribute: Text
) -> Dict[Text, int]:
"""Create label_id dictionary."""
distinct_label_ids = {
example.get(attribute) for example in training_data.intent_examples
} - {None}
return {
label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))
}
@staticmethod
def _invert_mapping(mapping: Dict) -> Dict:
return {value: key for key, value in mapping.items()}
def _create_entity_tag_specs(
self, training_data: TrainingData
) -> List[EntityTagSpec]:
"""Create entity tag specifications with their respective tag id mappings."""
_tag_specs = []
for tag_name in POSSIBLE_TAGS:
if self.component_config[BILOU_FLAG]:
tag_id_index_mapping = bilou_utils.build_tag_id_dict(
training_data, tag_name
)
else:
tag_id_index_mapping = self._tag_id_index_mapping_for(
tag_name, training_data
)
if tag_id_index_mapping:
_tag_specs.append(
EntityTagSpec(
tag_name=tag_name,
tags_to_ids=tag_id_index_mapping,
ids_to_tags=self._invert_mapping(tag_id_index_mapping),
num_tags=len(tag_id_index_mapping),
)
)
return _tag_specs
@staticmethod
def _tag_id_index_mapping_for(
tag_name: Text, training_data: TrainingData
) -> Optional[Dict[Text, int]]:
"""Create mapping from tag name to id."""
if tag_name == ENTITY_ATTRIBUTE_ROLE:
distinct_tags = training_data.entity_roles
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
distinct_tags = training_data.entity_groups
else:
distinct_tags = training_data.entities
distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None}
if not distinct_tags:
return None
tag_id_dict = {
tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1)
}
# NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index
# needed for correct prediction for padding
tag_id_dict[NO_ENTITY_TAG] = 0
return tag_id_dict
@staticmethod
def _find_example_for_label(
label: Text, examples: List[Message], attribute: Text
) -> Optional[Message]:
for ex in examples:
if ex.get(attribute) == label:
return ex
return None
def _check_labels_features_exist(
self, labels_example: List[Message], attribute: Text
) -> bool:
"""Checks if all labels have features set."""
return all(
label_example.features_present(
attribute, self.component_config[FEATURIZERS]
)
for label_example in labels_example
)
def _extract_features(
self, message: Message, attribute: Text
) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:
(
sparse_sequence_features,
sparse_sentence_features,
) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])
dense_sequence_features, dense_sentence_features = message.get_dense_features(
attribute, self.component_config[FEATURIZERS]
)
if dense_sequence_features is not None and sparse_sequence_features is not None:
if (
dense_sequence_features.features.shape[0]
!= sparse_sequence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sequence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
if dense_sentence_features is not None and sparse_sentence_features is not None:
if (
dense_sentence_features.features.shape[0]
!= sparse_sentence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sentence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
# If we don't use the transformer and we don't want to do entity recognition,
# to speed up training take only the sentence features as feature vector.
# We would not make use of the sequence anyway in this setup. Carrying over
# those features to the actual training process takes quite some time.
if (
self.component_config[NUM_TRANSFORMER_LAYERS] == 0
and not self.component_config[ENTITY_RECOGNITION]
and attribute not in [INTENT, INTENT_RESPONSE_KEY]
):
sparse_sequence_features = None
dense_sequence_features = None
out = {}
if sparse_sentence_features is not None:
out[f"{SPARSE}_{SENTENCE}"] = sparse_sentence_features.features
if sparse_sequence_features is not None:
out[f"{SPARSE}_{SEQUENCE}"] = sparse_sequence_features.features
if dense_sentence_features is not None:
out[f"{DENSE}_{SENTENCE}"] = dense_sentence_features.features
if dense_sequence_features is not None:
out[f"{DENSE}_{SEQUENCE}"] = dense_sequence_features.features
return out
def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:
"""Checks if features have same dimensionality if hidden layers are shared."""
if self.component_config.get(SHARE_HIDDEN_LAYERS):
num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)
num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)
num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)
num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)
if (0 < num_text_sentence_features != num_label_sentence_features > 0) or (
0 < num_text_sequence_features != num_label_sequence_features > 0
):
raise ValueError(
"If embeddings are shared text features and label features "
"must coincide. Check the output dimensions of previous components."
)
def _extract_labels_precomputed_features(
self, label_examples: List[Message], attribute: Text = INTENT
) -> Tuple[List[FeatureArray], List[FeatureArray]]:
"""Collects precomputed encodings."""
features = defaultdict(list)
for e in label_examples:
label_features = self._extract_features(e, attribute)
for feature_key, feature_value in label_features.items():
features[feature_key].append(feature_value)
sequence_features = []
sentence_features = []
for feature_name, feature_value in features.items():
if SEQUENCE in feature_name:
sequence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
else:
sentence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
return sequence_features, sentence_features
@staticmethod
def _compute_default_label_features(
labels_example: List[Message],
) -> List[FeatureArray]:
"""Computes one-hot representation for the labels."""
logger.debug("No label features found. Computing default label features.")
eye_matrix = np.eye(len(labels_example), dtype=np.float32)
# add sequence dimension to one-hot labels
return [
FeatureArray(
np.array([np.expand_dims(a, 0) for a in eye_matrix]),
number_of_dimensions=3,
)
]
def _create_label_data(
self,
training_data: TrainingData,
label_id_dict: Dict[Text, int],
attribute: Text,
) -> RasaModelData:
"""Create matrix with label_ids encoded in rows as bag of words.
Find a training example for each label and get the encoded features
from the corresponding Message object.
If the features are already computed, fetch them from the message object
else compute a one hot encoding for the label as the feature vector.
"""
# Collect one example for each label
labels_idx_examples = []
for label_name, idx in label_id_dict.items():
label_example = self._find_example_for_label(
label_name, training_data.intent_examples, attribute
)
labels_idx_examples.append((idx, label_example))
# Sort the list of tuples based on label_idx
labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])
labels_example = [example for (_, example) in labels_idx_examples]
# Collect features, precomputed if they exist, else compute on the fly
if self._check_labels_features_exist(labels_example, attribute):
(
sequence_features,
sentence_features,
) = self._extract_labels_precomputed_features(labels_example, attribute)
else:
sequence_features = None
sentence_features = self._compute_default_label_features(labels_example)
label_data = RasaModelData()
label_data.add_features(LABEL, SEQUENCE, sequence_features)
label_data.add_features(LABEL, SENTENCE, sentence_features)
if label_data.does_feature_not_exist(
LABEL, SENTENCE
) and label_data.does_feature_not_exist(LABEL, SEQUENCE):
raise ValueError(
"No label features are present. Please check your configuration file."
)
label_ids = np.array([idx for (idx, _) in labels_idx_examples])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
return label_data
def _use_default_label_features(self, label_ids: np.ndarray) -> List[FeatureArray]:
feature_arrays: List[FeatureArray] = self._label_data.get(LABEL, SENTENCE)
all_label_features = feature_arrays[0]
return [
FeatureArray(
np.array([all_label_features[label_id] for label_id in label_ids]),
number_of_dimensions=all_label_features.number_of_dimensions,
)
]
def _create_model_data(
self,
training_data: List[Message],
label_id_dict: Optional[Dict[Text, int]] = None,
label_attribute: Optional[Text] = None,
training: bool = True,
) -> RasaModelData:
"""Prepare data for training and create a RasaModelData object."""
from rasa.utils.tensorflow import model_data_utils
attributes_to_consider = [TEXT]
if training and self.component_config[INTENT_CLASSIFICATION]:
# we don't have any intent labels during prediction, just add them during
# training
attributes_to_consider.append(label_attribute)
if (
training
and self.component_config[ENTITY_RECOGNITION]
and self._entity_tag_specs
):
# Add entities as labels only during training and only if there was
# training data added for entities with DIET configured to predict entities.
attributes_to_consider.append(ENTITIES)
if training and label_attribute is not None:
# only use those training examples that have the label_attribute set
# during training
training_data = [
example for example in training_data if label_attribute in example.data
]
training_data = [
message
for message in training_data
if message.features_present(
attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS)
)
]
if not training_data:
# no training data are present to train
return RasaModelData()
(
features_for_examples,
sparse_feature_sizes,
) = model_data_utils.featurize_training_examples(
training_data,
attributes_to_consider,
entity_tag_specs=self._entity_tag_specs,
featurizers=self.component_config[FEATURIZERS],
bilou_tagging=self.component_config[BILOU_FLAG],
)
attribute_data, _ = model_data_utils.convert_to_data_format(
features_for_examples, consider_dialogue_dimension=False
)
model_data = RasaModelData(
label_key=self.label_key, label_sub_key=self.label_sub_key
)
model_data.add_data(attribute_data)
model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)
# Current implementation doesn't yet account for updating sparse
# feature sizes of label attributes. That's why we remove them.
sparse_feature_sizes = self._remove_label_sparse_feature_sizes(
sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute
)
model_data.add_sparse_feature_sizes(sparse_feature_sizes)
self._add_label_features(
model_data, training_data, label_attribute, label_id_dict, training
)
# make sure all keys are in the same order during training and prediction
# as we rely on the order of key and sub-key when constructing the actual
# tensors from the model data
model_data.sort()
return model_data
@staticmethod
def _remove_label_sparse_feature_sizes(
sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
label_attribute: Optional[Text] = None,
) -> Dict[Text, Dict[Text, List[int]]]:
if label_attribute in sparse_feature_sizes:
del sparse_feature_sizes[label_attribute]
return sparse_feature_sizes
def _add_label_features(
self,
model_data: RasaModelData,
training_data: List[Message],
label_attribute: Text,
label_id_dict: Dict[Text, int],
training: bool = True,
) -> None:
label_ids = []
if training and self.component_config[INTENT_CLASSIFICATION]:
for example in training_data:
if example.get(label_attribute):
label_ids.append(label_id_dict[example.get(label_attribute)])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
model_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
if (
label_attribute
and model_data.does_feature_not_exist(label_attribute, SENTENCE)
and model_data.does_feature_not_exist(label_attribute, SEQUENCE)
):
# no label features are present, get default features from _label_data
model_data.add_features(
LABEL, SENTENCE, self._use_default_label_features(np.array(label_ids))
)
# as label_attribute can have different values, e.g. INTENT or RESPONSE,
# copy over the features to the LABEL key to make
# it easier to access the label features inside the model itself
model_data.update_key(label_attribute, SENTENCE, LABEL, SENTENCE)
model_data.update_key(label_attribute, SEQUENCE, LABEL, SEQUENCE)
model_data.update_key(label_attribute, MASK, LABEL, MASK)
model_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
# train helpers
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
"""Prepares data for training.
Performs sanity checks on training data, extracts encodings for labels.
"""
if self.component_config[BILOU_FLAG]:
bilou_utils.apply_bilou_schema(training_data)
label_id_index_mapping = self._label_id_index_mapping(
training_data, attribute=INTENT
)
if not label_id_index_mapping:
# no labels are present to train
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(
training_data, label_id_index_mapping, attribute=INTENT
)
self._entity_tag_specs = self._create_entity_tag_specs(training_data)
label_attribute = (
INTENT if self.component_config[INTENT_CLASSIFICATION] else None
)
model_data = self._create_model_data(
training_data.nlu_examples,
label_id_index_mapping,
label_attribute=label_attribute,
)
self._check_input_dimension_consistency(model_data)
return model_data
@staticmethod
def _check_enough_labels(model_data: RasaModelData) -> bool:
return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2
def train(self, training_data: TrainingData) -> Resource:
"""Train the embedding intent classifier on a data set."""
model_data = self.preprocess_train_data(training_data)
if model_data.is_empty():
logger.debug(
f"Cannot train '{self.__class__.__name__}'. No data was provided. "
f"Skipping training of the classifier."
)
return self._resource
if not self.model and self.finetune_mode:
raise rasa.shared.exceptions.InvalidParameterException(
f"{self.__class__.__name__} was instantiated "
f"with `model=None` and `finetune_mode=True`. "
f"This is not a valid combination as the component "
f"needs an already instantiated and trained model "
f"to continue training in finetune mode."
)
if self.component_config.get(INTENT_CLASSIFICATION):
if not self._check_enough_labels(model_data):
logger.error(
f"Cannot train '{self.__class__.__name__}'. "
f"Need at least 2 different intent classes. "
f"Skipping training of classifier."
)
return self._resource
if self.component_config.get(ENTITY_RECOGNITION):
self.check_correct_entity_annotations(training_data)
# keep one example for persisting and loading
self._data_example = model_data.first_data_example()
if not self.finetune_mode:
# No pre-trained model to load from. Create a new instance of the model.
self.model = self._instantiate_model_class(model_data)
self.model.compile(
optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE])
)
else:
self.model.adjust_for_incremental_training(
data_example=self._data_example,
new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(),
old_sparse_feature_sizes=self._sparse_feature_sizes,
)
self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()
data_generator, validation_data_generator = train_utils.create_data_generators(
model_data,
self.component_config[BATCH_SIZES],
self.component_config[EPOCHS],
self.component_config[BATCH_STRATEGY],
self.component_config[EVAL_NUM_EXAMPLES],
self.component_config[RANDOM_SEED],
)
callbacks = train_utils.create_common_callbacks(
self.component_config[EPOCHS],
self.component_config[TENSORBOARD_LOG_DIR],
self.component_config[TENSORBOARD_LOG_LEVEL],
self.tmp_checkpoint_dir,
)
self.model.fit(
data_generator,
epochs=self.component_config[EPOCHS],
validation_data=validation_data_generator,
validation_freq=self.component_config[EVAL_NUM_EPOCHS],
callbacks=callbacks,
verbose=False,
shuffle=False, # we use custom shuffle inside data generator
)
self.persist()
return self._resource
# process helpers
def _predict(
self, message: Message
) -> Optional[Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]]:
if self.model is None:
logger.debug(
f"There is no trained model for '{self.__class__.__name__}': The "
f"component is either not trained or didn't receive enough training "
f"data."
)
return None
# create session data from message and convert it into a batch of 1
model_data = self._create_model_data([message], training=False)
if model_data.is_empty():
return None
return self.model.run_inference(model_data)
def _predict_label(
self, predict_out: Optional[Dict[Text, tf.Tensor]]
) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]:
"""Predicts the intent of the provided message."""
label: Dict[Text, Any] = {"name": None, "confidence": 0.0}
label_ranking = []
if predict_out is None:
return label, label_ranking
message_sim = predict_out["i_scores"]
message_sim = message_sim.flatten() # sim is a matrix
# if X contains all zeros do not predict some label
if message_sim.size == 0:
return label, label_ranking
# rank the confidences
ranking_length = self.component_config[RANKING_LENGTH]
renormalize = (
self.component_config[RENORMALIZE_CONFIDENCES]
and self.component_config[MODEL_CONFIDENCE] == SOFTMAX
)
ranked_label_indices, message_sim = train_utils.rank_and_mask(
message_sim, ranking_length=ranking_length, renormalize=renormalize
)
# construct the label and ranking
casted_message_sim: List[float] = message_sim.tolist() # np.float to float
top_label_idx = ranked_label_indices[0]
label = {
"name": self.index_label_id_mapping[top_label_idx],
"confidence": casted_message_sim[top_label_idx],
}
ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]
label_ranking = [
{"name": self.index_label_id_mapping[label_idx], "confidence": score}
for label_idx, score in ranking
]
return label, label_ranking
def _predict_entities(
self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message
) -> List[Dict]:
if predict_out is None:
return []
predicted_tags, confidence_values = train_utils.entity_label_to_tags(
predict_out, self._entity_tag_specs, self.component_config[BILOU_FLAG]
)
entities = self.convert_predictions_into_entities(
message.get(TEXT),
message.get(TOKENS_NAMES[TEXT], []),
predicted_tags,
self.split_entities_config,
confidence_values,
)
entities = self.add_extractor_name(entities)
entities = message.get(ENTITIES, []) + entities
return entities
def process(self, messages: List[Message]) -> List[Message]:
"""Augments the message with intents, entities, and diagnostic data."""
for message in messages:
out = self._predict(message)
if self.component_config[INTENT_CLASSIFICATION]:
label, label_ranking = self._predict_label(out)
message.set(INTENT, label, add_to_output=True)
message.set("intent_ranking", label_ranking, add_to_output=True)
if self.component_config[ENTITY_RECOGNITION]:
entities = self._predict_entities(out, message)
message.set(ENTITIES, entities, add_to_output=True)
if out and self._execution_context.should_add_diagnostic_data:
message.add_diagnostic_data(
self._execution_context.node_name, out.get(DIAGNOSTIC_DATA)
)
return messages
def persist(self) -> None:
"""Persist this model into the passed directory."""
if self.model is None:
return None
with self._model_storage.write_to(self._resource) as model_path:
file_name = self.__class__.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
rasa.shared.utils.io.create_directory_for_file(tf_model_file)
if self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir:
self.model.load_weights(self.tmp_checkpoint_dir / "checkpoint.tf_model")
# Save an empty file to flag that this model has been
# produced using checkpointing
checkpoint_marker = model_path / f"{file_name}.from_checkpoint.pkl"
checkpoint_marker.touch()
self.model.save(str(tf_model_file))
io_utils.pickle_dump(
model_path / f"{file_name}.data_example.pkl", self._data_example
)
io_utils.pickle_dump(
model_path / f"{file_name}.sparse_feature_sizes.pkl",
self._sparse_feature_sizes,
)
io_utils.pickle_dump(
model_path / f"{file_name}.label_data.pkl", dict(self._label_data.data)
)
io_utils.json_pickle(
model_path / f"{file_name}.index_label_id_mapping.json",
self.index_label_id_mapping,
)
entity_tag_specs = (
[tag_spec._asdict() for tag_spec in self._entity_tag_specs]
if self._entity_tag_specs
else []
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
model_path / f"{file_name}.entity_tag_specs.json", entity_tag_specs
)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> DIETClassifier:
"""Loads a policy from the storage (see parent class for full docstring)."""
try:
with model_storage.read_from(resource) as model_path:
return cls._load(
model_path, config, model_storage, resource, execution_context
)
except ValueError:
logger.debug(
f"Failed to load {cls.__class__.__name__} from model storage. Resource "
f"'{resource.name}' doesn't exist."
)
return cls(config, model_storage, resource, execution_context)
@classmethod
def _load(
cls,
model_path: Path,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> "DIETClassifier":
"""Loads the trained model from the provided directory."""
(
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
) = cls._load_from_files(model_path)
config = train_utils.update_confidence_type(config)
config = train_utils.update_similarity_type(config)
model = cls._load_model(
entity_tag_specs,
label_data,
config,
data_example,
model_path,
finetune_mode=execution_context.is_finetuning,
)
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
index_label_id_mapping=index_label_id_mapping,
entity_tag_specs=entity_tag_specs,
model=model,
sparse_feature_sizes=sparse_feature_sizes,
)
@classmethod
def _load_from_files(
cls, model_path: Path
) -> Tuple[
Dict[int, Text],
List[EntityTagSpec],
RasaModelData,
Dict[Text, Dict[Text, List[FeatureArray]]],
Dict[Text, Dict[Text, List[int]]],
]:
file_name = cls.__name__
data_example = io_utils.pickle_load(
model_path / f"{file_name}.data_example.pkl"
)
label_data = io_utils.pickle_load(model_path / f"{file_name}.label_data.pkl")
label_data = RasaModelData(data=label_data)
sparse_feature_sizes = io_utils.pickle_load(
model_path / f"{file_name}.sparse_feature_sizes.pkl"
)
index_label_id_mapping = io_utils.json_unpickle(
model_path / f"{file_name}.index_label_id_mapping.json"
)
entity_tag_specs = rasa.shared.utils.io.read_json_file(
model_path / f"{file_name}.entity_tag_specs.json"
)
entity_tag_specs = [
EntityTagSpec(
tag_name=tag_spec["tag_name"],
ids_to_tags={
int(key): value for key, value in tag_spec["ids_to_tags"].items()
},
tags_to_ids={
key: int(value) for key, value in tag_spec["tags_to_ids"].items()
},
num_tags=tag_spec["num_tags"],
)
for tag_spec in entity_tag_specs
]
# jsonpickle converts dictionary keys to strings
index_label_id_mapping = {
int(key): value for key, value in index_label_id_mapping.items()
}
return (
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
)
@classmethod
def _load_model(
cls,
entity_tag_specs: List[EntityTagSpec],
label_data: RasaModelData,
config: Dict[Text, Any],
data_example: Dict[Text, Dict[Text, List[FeatureArray]]],
model_path: Path,
finetune_mode: bool = False,
) -> "RasaModel":
file_name = cls.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
label_key = LABEL_KEY if config[INTENT_CLASSIFICATION] else None
label_sub_key = LABEL_SUB_KEY if config[INTENT_CLASSIFICATION] else None
model_data_example = RasaModelData(
label_key=label_key, label_sub_key=label_sub_key, data=data_example
)
model = cls._load_model_class(
tf_model_file,
model_data_example,
label_data,
entity_tag_specs,
config,
finetune_mode=finetune_mode,
)
return model
@classmethod
def _load_model_class(
cls,
tf_model_file: Text,
model_data_example: RasaModelData,
label_data: RasaModelData,
entity_tag_specs: List[EntityTagSpec],
config: Dict[Text, Any],
finetune_mode: bool,
) -> "RasaModel":
predict_data_example = RasaModelData(
label_key=model_data_example.label_key,
data={
feature_name: features
for feature_name, features in model_data_example.items()
if TEXT in feature_name
},
)
return cls.model_class().load(
tf_model_file,
model_data_example,
predict_data_example,
data_signature=model_data_example.get_signature(),
label_data=label_data,
entity_tag_specs=entity_tag_specs,
config=copy.deepcopy(config),
finetune_mode=finetune_mode,
)
def _instantiate_model_class(self, model_data: RasaModelData) -> "RasaModel":
return self.model_class()(
data_signature=model_data.get_signature(),
label_data=self._label_data,
entity_tag_specs=self._entity_tag_specs,
config=self.component_config,
)
class DIET(TransformerRasaModel):
def __init__(
self,
data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],
label_data: RasaModelData,
entity_tag_specs: Optional[List[EntityTagSpec]],
config: Dict[Text, Any],
) -> None:
# create entity tag spec before calling super otherwise building the model
# will fail
super().__init__("DIET", config, data_signature, label_data)
self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs)
self.predict_data_signature = {
feature_name: features
for feature_name, features in data_signature.items()
if TEXT in feature_name
}
# tf training
self._create_metrics()
self._update_metrics_to_log()
# needed for efficient prediction
self.all_labels_embed: Optional[tf.Tensor] = None
self._prepare_layers()
@staticmethod
def _ordered_tag_specs(
entity_tag_specs: Optional[List[EntityTagSpec]],
) -> List[EntityTagSpec]:
"""Ensure that order of entity tag specs matches CRF layer order."""
if entity_tag_specs is None:
return []
crf_order = [
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_ROLE,
ENTITY_ATTRIBUTE_GROUP,
]
ordered_tag_spec = []
for tag_name in crf_order:
for tag_spec in entity_tag_specs:
if tag_name == tag_spec.tag_name:
ordered_tag_spec.append(tag_spec)
return ordered_tag_spec
def _check_data(self) -> None:
if TEXT not in self.data_signature:
raise InvalidConfigException(
f"No text features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[INTENT_CLASSIFICATION]:
if LABEL not in self.data_signature:
raise InvalidConfigException(
f"No label features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[SHARE_HIDDEN_LAYERS]:
different_sentence_signatures = False
different_sequence_signatures = False
if (
SENTENCE in self.data_signature[TEXT]
and SENTENCE in self.data_signature[LABEL]
):
different_sentence_signatures = (
self.data_signature[TEXT][SENTENCE]
!= self.data_signature[LABEL][SENTENCE]
)
if (
SEQUENCE in self.data_signature[TEXT]
and SEQUENCE in self.data_signature[LABEL]
):
different_sequence_signatures = (
self.data_signature[TEXT][SEQUENCE]
!= self.data_signature[LABEL][SEQUENCE]
)
if different_sentence_signatures or different_sequence_signatures:
raise ValueError(
"If hidden layer weights are shared, data signatures "
"for text_features and label_features must coincide."
)
if self.config[ENTITY_RECOGNITION] and (
ENTITIES not in self.data_signature
or ENTITY_ATTRIBUTE_TYPE not in self.data_signature[ENTITIES]
):
logger.debug(
f"You specified '{self.__class__.__name__}' to train entities, but "
f"no entities are present in the training data. Skipping training of "
f"entities."
)
self.config[ENTITY_RECOGNITION] = False
def _create_metrics(self) -> None:
# self.metrics will have the same order as they are created
# so create loss metrics first to output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.intent_loss = tf.keras.metrics.Mean(name="i_loss")
self.entity_loss = tf.keras.metrics.Mean(name="e_loss")
self.entity_group_loss = tf.keras.metrics.Mean(name="g_loss")
self.entity_role_loss = tf.keras.metrics.Mean(name="r_loss")
# create accuracy metrics second to output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.intent_acc = tf.keras.metrics.Mean(name="i_acc")
self.entity_f1 = tf.keras.metrics.Mean(name="e_f1")
self.entity_group_f1 = tf.keras.metrics.Mean(name="g_f1")
self.entity_role_f1 = tf.keras.metrics.Mean(name="r_f1")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
if self.config[INTENT_CLASSIFICATION]:
self.metrics_to_log.append("i_acc")
if debug_log_level:
self.metrics_to_log.append("i_loss")
if self.config[ENTITY_RECOGNITION]:
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags != 0:
name = tag_spec.tag_name
self.metrics_to_log.append(f"{name[0]}_f1")
if debug_log_level:
self.metrics_to_log.append(f"{name[0]}_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {
"t": "total",
"i": "intent",
"e": "entity",
"m": "mask",
"r": "role",
"g": "group",
}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _prepare_layers(self) -> None:
# For user text, prepare layers that combine different feature types, embed
# everything using a transformer and optionally also do masked language
# modeling.
self.text_name = TEXT
self._tf_layers[
f"sequence_layer.{self.text_name}"
] = rasa_layers.RasaSequenceLayer(
self.text_name, self.data_signature[self.text_name], self.config
)
if self.config[MASKED_LM]:
self._prepare_mask_lm_loss(self.text_name)
# Intent labels are treated similarly to user text but without the transformer,
# without masked language modelling, and with no dropout applied to the
# individual features, only to the overall label embedding after all label
# features have been combined.
if self.config[INTENT_CLASSIFICATION]:
self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL
# disable input dropout applied to sparse and dense label features
label_config = self.config.copy()
label_config.update(
{SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}
)
self._tf_layers[
f"feature_combining_layer.{self.label_name}"
] = rasa_layers.RasaFeatureCombiningLayer(
self.label_name, self.label_signature[self.label_name], label_config
)
self._prepare_ffnn_layer(
self.label_name,
self.config[HIDDEN_LAYERS_SIZES][self.label_name],
self.config[DROP_RATE],
)
self._prepare_label_classification_layers(predictor_attribute=TEXT)
if self.config[ENTITY_RECOGNITION]:
self._prepare_entity_recognition_layers()
def _prepare_mask_lm_loss(self, name: Text) -> None:
# for embedding predicted tokens at masked positions
self._prepare_embed_layers(f"{name}_lm_mask")
# for embedding the true tokens that got masked
self._prepare_embed_layers(f"{name}_golden_token")
# mask loss is additional loss
# set scaling to False, so that it doesn't overpower other losses
self._prepare_dot_product_loss(f"{name}_mask", scale_loss=False)
def _create_bow(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sequence_feature_lengths: tf.Tensor,
name: Text,
) -> tf.Tensor:
x, _ = self._tf_layers[f"feature_combining_layer.{name}"](
(sequence_features, sentence_features, sequence_feature_lengths),
training=self._training,
)
# convert to bag-of-words by summing along the sequence dimension
x = tf.reduce_sum(x, axis=1)
return self._tf_layers[f"ffnn.{name}"](x, self._training)
def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:
all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]
sequence_feature_lengths = self._get_sequence_feature_lengths(
self.tf_label_data, LABEL
)
x = self._create_bow(
self.tf_label_data[LABEL][SEQUENCE],
self.tf_label_data[LABEL][SENTENCE],
sequence_feature_lengths,
self.label_name,
)
all_labels_embed = self._tf_layers[f"embed.{LABEL}"](x)
return all_label_ids, all_labels_embed
def _mask_loss(
self,
outputs: tf.Tensor,
inputs: tf.Tensor,
seq_ids: tf.Tensor,
mlm_mask_boolean: tf.Tensor,
name: Text,
) -> tf.Tensor:
# make sure there is at least one element in the mask
mlm_mask_boolean = tf.cond(
tf.reduce_any(mlm_mask_boolean),
lambda: mlm_mask_boolean,
lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(mlm_mask_boolean)),
)
mlm_mask_boolean = tf.squeeze(mlm_mask_boolean, -1)
# Pick elements that were masked, throwing away the batch & sequence dimension
# and effectively switching from shape (batch_size, sequence_length, units) to
# (num_masked_elements, units).
outputs = tf.boolean_mask(outputs, mlm_mask_boolean)
inputs = tf.boolean_mask(inputs, mlm_mask_boolean)
ids = tf.boolean_mask(seq_ids, mlm_mask_boolean)
tokens_predicted_embed = self._tf_layers[f"embed.{name}_lm_mask"](outputs)
tokens_true_embed = self._tf_layers[f"embed.{name}_golden_token"](inputs)
# To limit the otherwise computationally expensive loss calculation, we
# constrain the label space in MLM (i.e. token space) to only those tokens that
# were masked in this batch. Hence the reduced list of token embeddings
# (tokens_true_embed) and the reduced list of labels (ids) are passed as
# all_labels_embed and all_labels, respectively. In the future, we could be less
# restrictive and construct a slightly bigger label space which could include
# tokens not masked in the current batch too.
return self._tf_layers[f"loss.{name}_mask"](
inputs_embed=tokens_predicted_embed,
labels_embed=tokens_true_embed,
labels=ids,
all_labels_embed=tokens_true_embed,
all_labels=ids,
)
def _calculate_label_loss(
self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor
) -> tf.Tensor:
all_label_ids, all_labels_embed = self._create_all_labels()
text_embed = self._tf_layers[f"embed.{TEXT}"](text_features)
label_embed = self._tf_layers[f"embed.{LABEL}"](label_features)
return self._tf_layers[f"loss.{LABEL}"](
text_embed, label_embed, label_ids, all_labels_embed, all_label_ids
)
def batch_loss(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> tf.Tensor:
"""Calculates the loss for the given batch.
Args:
batch_in: The batch.
Returns:
The loss of the given batch.
"""
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
(
text_transformed,
text_in,
mask_combined_sequence_sentence,
text_seq_ids,
mlm_mask_boolean_text,
_,
) = self._tf_layers[f"sequence_layer.{self.text_name}"](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
losses = []
# Lengths of sequences in case of sentence-level features are always 1, but they
# can effectively be 0 if sentence-level features aren't present.
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
combined_sequence_sentence_feature_lengths = (
sequence_feature_lengths + sentence_feature_lengths
)
if self.config[MASKED_LM]:
loss, acc = self._mask_loss(
text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT
)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
if self.config[INTENT_CLASSIFICATION]:
loss = self._batch_loss_intent(
combined_sequence_sentence_feature_lengths,
text_transformed,
tf_batch_data,
)
losses.append(loss)
if self.config[ENTITY_RECOGNITION]:
losses += self._batch_loss_entities(
mask_combined_sequence_sentence,
sequence_feature_lengths,
text_transformed,
tf_batch_data,
)
return tf.math.add_n(losses)
def _batch_loss_intent(
self,
combined_sequence_sentence_feature_lengths_text: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> tf.Tensor:
# get sentence features vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths_text
)
sequence_feature_lengths_label = self._get_sequence_feature_lengths(
tf_batch_data, LABEL
)
label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]
label = self._create_bow(
tf_batch_data[LABEL][SEQUENCE],
tf_batch_data[LABEL][SENTENCE],
sequence_feature_lengths_label,
self.label_name,
)
loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids)
self._update_label_metrics(loss, acc)
return loss
def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:
self.intent_loss.update_state(loss)
self.intent_acc.update_state(acc)
def _batch_loss_entities(
self,
mask_combined_sequence_sentence: tf.Tensor,
sequence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> List[tf.Tensor]:
losses = []
entity_tags = None
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags == 0:
continue
tag_ids = tf_batch_data[ENTITIES][tag_spec.tag_name][0]
# add a zero (no entity) for the sentence features to match the shape of
# inputs
tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]])
loss, f1, _logits = self._calculate_entity_loss(
text_transformed,
tag_ids,
mask_combined_sequence_sentence,
sequence_feature_lengths,
tag_spec.tag_name,
entity_tags,
)
if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags
)
self._update_entity_metrics(loss, f1, tag_spec.tag_name)
losses.append(loss)
return losses
def _update_entity_metrics(
self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text
) -> None:
if tag_name == ENTITY_ATTRIBUTE_TYPE:
self.entity_loss.update_state(loss)
self.entity_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
self.entity_group_loss.update_state(loss)
self.entity_group_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_ROLE:
self.entity_role_loss.update_state(loss)
self.entity_role_f1.update_state(f1)
def prepare_for_predict(self) -> None:
"""Prepares the model for prediction."""
if self.config[INTENT_CLASSIFICATION]:
_, self.all_labels_embed = self._create_all_labels()
# MASKED: batch_predict function (lines 1723-1775)
def _batch_predict_entities(
self, sequence_feature_lengths: tf.Tensor, text_transformed: tf.Tensor
) -> Dict[Text, tf.Tensor]:
predictions: Dict[Text, tf.Tensor] = {}
entity_tags = None
for tag_spec in self._entity_tag_specs:
# skip crf layer if it was not trained
if tag_spec.num_tags == 0:
continue
name = tag_spec.tag_name
_input = text_transformed
if entity_tags is not None:
_tags = self._tf_layers[f"embed.{name}.tags"](entity_tags)
_input = tf.concat([_input, _tags], axis=-1)
_logits = self._tf_layers[f"embed.{name}.logits"](_input)
pred_ids, confidences = self._tf_layers[f"crf.{name}"](
_logits, sequence_feature_lengths
)
predictions[f"e_{name}_ids"] = pred_ids
predictions[f"e_{name}_scores"] = confidences
if name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags
)
return predictions
def _batch_predict_intents(
self,
combined_sequence_sentence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
) -> Dict[Text, tf.Tensor]:
if self.all_labels_embed is None:
raise ValueError(
"The model was not prepared for prediction. "
"Call `prepare_for_predict` first."
)
# get sentence feature vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths
)
sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector)
_, scores = self._tf_layers[
f"loss.{LABEL}"
].get_similarities_and_confidences_from_embeddings(
sentence_vector_embed[:, tf.newaxis, :],
self.all_labels_embed[tf.newaxis, :, :],
)
return {"i_scores": scores}
|
def batch_predict(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> Dict[Text, tf.Tensor]:
"""Predicts the output of the given batch.
Args:
batch_in: The batch.
Returns:
The output to predict.
"""
tf_batch_data = self.batch_to_model_data_format(
batch_in, self.predict_data_signature
)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
text_transformed, _, _, _, _, attention_weights = self._tf_layers[
f"sequence_layer.{self.text_name}"
](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
predictions = {
DIAGNOSTIC_DATA: {
"attention_weights": attention_weights,
"text_transformed": text_transformed,
}
}
if self.config[INTENT_CLASSIFICATION]:
predictions.update(
self._batch_predict_intents(
sequence_feature_lengths + sentence_feature_lengths,
text_transformed,
)
)
if self.config[ENTITY_RECOGNITION]:
predictions.update(
self._batch_predict_entities(sequence_feature_lengths, text_transformed)
)
return predictions
| 1,723
| 1,775
|
from __future__ import annotations
import copy
import logging
from collections import defaultdict
from pathlib import Path
from rasa.nlu.featurizers.featurizer import Featurizer
import numpy as np
import scipy.sparse
import tensorflow as tf
from typing import Any, Dict, List, Optional, Text, Tuple, Union, Type
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.extractors.extractor import EntityExtractorMixin
from rasa.nlu.classifiers.classifier import IntentClassifier
import rasa.shared.utils.io
import rasa.utils.io as io_utils
import rasa.nlu.utils.bilou_utils as bilou_utils
from rasa.shared.constants import DIAGNOSTIC_DATA
from rasa.nlu.extractors.extractor import EntityTagSpec
from rasa.nlu.classifiers import LABEL_RANKING_LENGTH
from rasa.utils import train_utils
from rasa.utils.tensorflow import rasa_layers
from rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel
from rasa.utils.tensorflow.model_data import (
RasaModelData,
FeatureSignature,
FeatureArray,
)
from rasa.nlu.constants import TOKENS_NAMES, DEFAULT_TRANSFORMER_SIZE
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
TEXT,
INTENT,
INTENT_RESPONSE_KEY,
ENTITIES,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_GROUP,
ENTITY_ATTRIBUTE_ROLE,
NO_ENTITY_TAG,
SPLIT_ENTITIES_BY_COMMA,
)
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.utils.tensorflow.constants import (
LABEL,
IDS,
HIDDEN_LAYERS_SIZES,
RENORMALIZE_CONFIDENCES,
SHARE_HIDDEN_LAYERS,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
LEARNING_RATE,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
TENSORBOARD_LOG_DIR,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
UNIDIRECTIONAL_ENCODER,
DROP_RATE,
DROP_RATE_ATTENTION,
CONNECTION_DENSITY,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
BILOU_FLAG,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
AUTO,
BALANCED,
CROSS_ENTROPY,
TENSORBOARD_LOG_LEVEL,
CONCAT_DIMENSION,
FEATURIZERS,
CHECKPOINT_MODEL,
SEQUENCE,
SENTENCE,
SEQUENCE_LENGTH,
DENSE_DIMENSION,
MASK,
CONSTRAIN_SIMILARITIES,
MODEL_CONFIDENCE,
SOFTMAX,
)
logger = logging.getLogger(__name__)
SPARSE = "sparse"
DENSE = "dense"
LABEL_KEY = LABEL
LABEL_SUB_KEY = IDS
POSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]
@DefaultV1Recipe.register(
[
DefaultV1Recipe.ComponentType.INTENT_CLASSIFIER,
DefaultV1Recipe.ComponentType.ENTITY_EXTRACTOR,
],
is_trainable=True,
)
class DIETClassifier(GraphComponent, IntentClassifier, EntityExtractorMixin):
"""A multi-task model for intent classification and entity extraction.
DIET is Dual Intent and Entity Transformer.
The architecture is based on a transformer which is shared for both tasks.
A sequence of entity labels is predicted through a Conditional Random Field (CRF)
tagging layer on top of the transformer output sequence corresponding to the
input sequence of tokens. The transformer output for the ``__CLS__`` token and
intent labels are embedded into a single semantic vector space. We use the
dot-product loss to maximize the similarity with the target label and minimize
similarities with negative samples.
"""
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [Featurizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""The component's default config (see parent class for full docstring)."""
# please make sure to update the docs when changing a default parameter
return {
# ## Architecture of the used neural network
# Hidden layer sizes for layers before the embedding layers for user message
# and labels.
# The number of hidden layers is equal to the length of the corresponding
# list.
HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []},
# Whether to share the hidden layer weights between user message and labels.
SHARE_HIDDEN_LAYERS: False,
# Number of units in transformer
TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE,
# Number of transformer layers
NUM_TRANSFORMER_LAYERS: 2,
# Number of attention heads in transformer
NUM_HEADS: 4,
# If 'True' use key relative embeddings in attention
KEY_RELATIVE_ATTENTION: False,
# If 'True' use value relative embeddings in attention
VALUE_RELATIVE_ATTENTION: False,
# Max position for relative embeddings. Only in effect if key- or value
# relative attention are turned on
MAX_RELATIVE_POSITION: 5,
# Use a unidirectional or bidirectional encoder.
UNIDIRECTIONAL_ENCODER: False,
# ## Training parameters
# Initial and final batch sizes:
# Batch size will be linearly increased for each epoch.
BATCH_SIZES: [64, 256],
# Strategy used when creating batches.
# Can be either 'sequence' or 'balanced'.
BATCH_STRATEGY: BALANCED,
# Number of epochs to train
EPOCHS: 300,
# Set random seed to any 'int' to get reproducible results
RANDOM_SEED: None,
# Initial learning rate for the optimizer
LEARNING_RATE: 0.001,
# ## Parameters for embeddings
# Dimension size of embedding vectors
EMBEDDING_DIMENSION: 20,
# Dense dimension to use for sparse features.
DENSE_DIMENSION: {TEXT: 128, LABEL: 20},
# Default dimension to use for concatenating sequence and sentence features.
CONCAT_DIMENSION: {TEXT: 128, LABEL: 20},
# The number of incorrect labels. The algorithm will minimize
# their similarity to the user input during training.
NUM_NEG: 20,
# Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.
SIMILARITY_TYPE: AUTO,
# The type of the loss function, either 'cross_entropy' or 'margin'.
LOSS_TYPE: CROSS_ENTROPY,
# Number of top intents for which confidences should be reported.
# Set to 0 if confidences for all intents should be reported.
RANKING_LENGTH: LABEL_RANKING_LENGTH,
# Indicates how similar the algorithm should try to make embedding vectors
# for correct labels.
# Should be 0.0 < ... < 1.0 for 'cosine' similarity type.
MAX_POS_SIM: 0.8,
# Maximum negative similarity for incorrect labels.
# Should be -1.0 < ... < 1.0 for 'cosine' similarity type.
MAX_NEG_SIM: -0.4,
# If 'True' the algorithm only minimizes maximum similarity over
# incorrect intent labels, used only if 'loss_type' is set to 'margin'.
USE_MAX_NEG_SIM: True,
# If 'True' scale loss inverse proportionally to the confidence
# of the correct prediction
SCALE_LOSS: False,
# ## Regularization parameters
# The scale of regularization
REGULARIZATION_CONSTANT: 0.002,
# The scale of how important is to minimize the maximum similarity
# between embeddings of different labels,
# used only if 'loss_type' is set to 'margin'.
NEGATIVE_MARGIN_SCALE: 0.8,
# Dropout rate for encoder
DROP_RATE: 0.2,
# Dropout rate for attention
DROP_RATE_ATTENTION: 0,
# Fraction of trainable weights in internal layers.
CONNECTION_DENSITY: 0.2,
# If 'True' apply dropout to sparse input tensors
SPARSE_INPUT_DROPOUT: True,
# If 'True' apply dropout to dense input tensors
DENSE_INPUT_DROPOUT: True,
# ## Evaluation parameters
# How often calculate validation accuracy.
# Small values may hurt performance.
EVAL_NUM_EPOCHS: 20,
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
# Set to 0 for no validation.
EVAL_NUM_EXAMPLES: 0,
# ## Model config
# If 'True' intent classification is trained and intent predicted.
INTENT_CLASSIFICATION: True,
# If 'True' named entity recognition is trained and entities predicted.
ENTITY_RECOGNITION: True,
# If 'True' random tokens of the input message will be masked and the model
# should predict those tokens.
MASKED_LM: False,
# 'BILOU_flag' determines whether to use BILOU tagging or not.
# If set to 'True' labelling is more rigorous, however more
# examples per entity are required.
# Rule of thumb: you should have more than 100 examples per entity.
BILOU_FLAG: True,
# If you want to use tensorboard to visualize training and validation
# metrics, set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'batch'
TENSORBOARD_LOG_LEVEL: "epoch",
# Perform model checkpointing
CHECKPOINT_MODEL: False,
# Specify what features to use as sequence and sentence features
# By default all features in the pipeline are used.
FEATURIZERS: [],
# Split entities by comma, this makes sense e.g. for a list of ingredients
# in a recipie, but it doesn't make sense for the parts of an address
SPLIT_ENTITIES_BY_COMMA: True,
# If 'True' applies sigmoid on all similarity terms and adds
# it to the loss function to ensure that similarity values are
# approximately bounded. Used inside cross-entropy loss only.
CONSTRAIN_SIMILARITIES: False,
# Model confidence to be returned during inference. Currently, the only
# possible value is `softmax`.
MODEL_CONFIDENCE: SOFTMAX,
# Determines whether the confidences of the chosen top intents should be
# renormalized so that they sum up to 1. By default, we do not renormalize
# and return the confidences for the top intents as is.
# Note that renormalization only makes sense if confidences are generated
# via `softmax`.
RENORMALIZE_CONFIDENCES: False,
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
index_label_id_mapping: Optional[Dict[int, Text]] = None,
entity_tag_specs: Optional[List[EntityTagSpec]] = None,
model: Optional[RasaModel] = None,
sparse_feature_sizes: Optional[Dict[Text, Dict[Text, List[int]]]] = None,
) -> None:
"""Declare instance variables with default values."""
if EPOCHS not in config:
rasa.shared.utils.io.raise_warning(
f"Please configure the number of '{EPOCHS}' in your configuration file."
f" We will change the default value of '{EPOCHS}' in the future to 1. "
)
self.component_config = config
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
self._check_config_parameters()
# transform numbers to labels
self.index_label_id_mapping = index_label_id_mapping or {}
self._entity_tag_specs = entity_tag_specs
self.model = model
self.tmp_checkpoint_dir = None
if self.component_config[CHECKPOINT_MODEL]:
self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())
self._label_data: Optional[RasaModelData] = None
self._data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None
self.split_entities_config = rasa.utils.train_utils.init_split_entities(
self.component_config[SPLIT_ENTITIES_BY_COMMA],
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
)
self.finetune_mode = self._execution_context.is_finetuning
self._sparse_feature_sizes = sparse_feature_sizes
# init helpers
def _check_masked_lm(self) -> None:
if (
self.component_config[MASKED_LM]
and self.component_config[NUM_TRANSFORMER_LAYERS] == 0
):
raise ValueError(
f"If number of transformer layers is 0, "
f"'{MASKED_LM}' option should be 'False'."
)
def _check_share_hidden_layers_sizes(self) -> None:
if self.component_config.get(SHARE_HIDDEN_LAYERS):
first_hidden_layer_sizes = next(
iter(self.component_config[HIDDEN_LAYERS_SIZES].values())
)
# check that all hidden layer sizes are the same
identical_hidden_layer_sizes = all(
current_hidden_layer_sizes == first_hidden_layer_sizes
for current_hidden_layer_sizes in self.component_config[
HIDDEN_LAYERS_SIZES
].values()
)
if not identical_hidden_layer_sizes:
raise ValueError(
f"If hidden layer weights are shared, "
f"{HIDDEN_LAYERS_SIZES} must coincide."
)
def _check_config_parameters(self) -> None:
self.component_config = train_utils.check_deprecated_options(
self.component_config
)
self._check_masked_lm()
self._check_share_hidden_layers_sizes()
self.component_config = train_utils.update_confidence_type(
self.component_config
)
train_utils.validate_configuration_settings(self.component_config)
self.component_config = train_utils.update_similarity_type(
self.component_config
)
self.component_config = train_utils.update_evaluation_parameters(
self.component_config
)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> DIETClassifier:
"""Creates a new untrained component (see parent class for full docstring)."""
return cls(config, model_storage, resource, execution_context)
@property
def label_key(self) -> Optional[Text]:
"""Return key if intent classification is activated."""
return LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@property
def label_sub_key(self) -> Optional[Text]:
"""Return sub key if intent classification is activated."""
return LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@staticmethod
def model_class() -> Type[RasaModel]:
return DIET
# training data helpers:
@staticmethod
def _label_id_index_mapping(
training_data: TrainingData, attribute: Text
) -> Dict[Text, int]:
"""Create label_id dictionary."""
distinct_label_ids = {
example.get(attribute) for example in training_data.intent_examples
} - {None}
return {
label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))
}
@staticmethod
def _invert_mapping(mapping: Dict) -> Dict:
return {value: key for key, value in mapping.items()}
def _create_entity_tag_specs(
self, training_data: TrainingData
) -> List[EntityTagSpec]:
"""Create entity tag specifications with their respective tag id mappings."""
_tag_specs = []
for tag_name in POSSIBLE_TAGS:
if self.component_config[BILOU_FLAG]:
tag_id_index_mapping = bilou_utils.build_tag_id_dict(
training_data, tag_name
)
else:
tag_id_index_mapping = self._tag_id_index_mapping_for(
tag_name, training_data
)
if tag_id_index_mapping:
_tag_specs.append(
EntityTagSpec(
tag_name=tag_name,
tags_to_ids=tag_id_index_mapping,
ids_to_tags=self._invert_mapping(tag_id_index_mapping),
num_tags=len(tag_id_index_mapping),
)
)
return _tag_specs
@staticmethod
def _tag_id_index_mapping_for(
tag_name: Text, training_data: TrainingData
) -> Optional[Dict[Text, int]]:
"""Create mapping from tag name to id."""
if tag_name == ENTITY_ATTRIBUTE_ROLE:
distinct_tags = training_data.entity_roles
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
distinct_tags = training_data.entity_groups
else:
distinct_tags = training_data.entities
distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None}
if not distinct_tags:
return None
tag_id_dict = {
tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1)
}
# NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index
# needed for correct prediction for padding
tag_id_dict[NO_ENTITY_TAG] = 0
return tag_id_dict
@staticmethod
def _find_example_for_label(
label: Text, examples: List[Message], attribute: Text
) -> Optional[Message]:
for ex in examples:
if ex.get(attribute) == label:
return ex
return None
def _check_labels_features_exist(
self, labels_example: List[Message], attribute: Text
) -> bool:
"""Checks if all labels have features set."""
return all(
label_example.features_present(
attribute, self.component_config[FEATURIZERS]
)
for label_example in labels_example
)
def _extract_features(
self, message: Message, attribute: Text
) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:
(
sparse_sequence_features,
sparse_sentence_features,
) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])
dense_sequence_features, dense_sentence_features = message.get_dense_features(
attribute, self.component_config[FEATURIZERS]
)
if dense_sequence_features is not None and sparse_sequence_features is not None:
if (
dense_sequence_features.features.shape[0]
!= sparse_sequence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sequence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
if dense_sentence_features is not None and sparse_sentence_features is not None:
if (
dense_sentence_features.features.shape[0]
!= sparse_sentence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sentence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
# If we don't use the transformer and we don't want to do entity recognition,
# to speed up training take only the sentence features as feature vector.
# We would not make use of the sequence anyway in this setup. Carrying over
# those features to the actual training process takes quite some time.
if (
self.component_config[NUM_TRANSFORMER_LAYERS] == 0
and not self.component_config[ENTITY_RECOGNITION]
and attribute not in [INTENT, INTENT_RESPONSE_KEY]
):
sparse_sequence_features = None
dense_sequence_features = None
out = {}
if sparse_sentence_features is not None:
out[f"{SPARSE}_{SENTENCE}"] = sparse_sentence_features.features
if sparse_sequence_features is not None:
out[f"{SPARSE}_{SEQUENCE}"] = sparse_sequence_features.features
if dense_sentence_features is not None:
out[f"{DENSE}_{SENTENCE}"] = dense_sentence_features.features
if dense_sequence_features is not None:
out[f"{DENSE}_{SEQUENCE}"] = dense_sequence_features.features
return out
def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:
"""Checks if features have same dimensionality if hidden layers are shared."""
if self.component_config.get(SHARE_HIDDEN_LAYERS):
num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)
num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)
num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)
num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)
if (0 < num_text_sentence_features != num_label_sentence_features > 0) or (
0 < num_text_sequence_features != num_label_sequence_features > 0
):
raise ValueError(
"If embeddings are shared text features and label features "
"must coincide. Check the output dimensions of previous components."
)
def _extract_labels_precomputed_features(
self, label_examples: List[Message], attribute: Text = INTENT
) -> Tuple[List[FeatureArray], List[FeatureArray]]:
"""Collects precomputed encodings."""
features = defaultdict(list)
for e in label_examples:
label_features = self._extract_features(e, attribute)
for feature_key, feature_value in label_features.items():
features[feature_key].append(feature_value)
sequence_features = []
sentence_features = []
for feature_name, feature_value in features.items():
if SEQUENCE in feature_name:
sequence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
else:
sentence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
return sequence_features, sentence_features
@staticmethod
def _compute_default_label_features(
labels_example: List[Message],
) -> List[FeatureArray]:
"""Computes one-hot representation for the labels."""
logger.debug("No label features found. Computing default label features.")
eye_matrix = np.eye(len(labels_example), dtype=np.float32)
# add sequence dimension to one-hot labels
return [
FeatureArray(
np.array([np.expand_dims(a, 0) for a in eye_matrix]),
number_of_dimensions=3,
)
]
def _create_label_data(
self,
training_data: TrainingData,
label_id_dict: Dict[Text, int],
attribute: Text,
) -> RasaModelData:
"""Create matrix with label_ids encoded in rows as bag of words.
Find a training example for each label and get the encoded features
from the corresponding Message object.
If the features are already computed, fetch them from the message object
else compute a one hot encoding for the label as the feature vector.
"""
# Collect one example for each label
labels_idx_examples = []
for label_name, idx in label_id_dict.items():
label_example = self._find_example_for_label(
label_name, training_data.intent_examples, attribute
)
labels_idx_examples.append((idx, label_example))
# Sort the list of tuples based on label_idx
labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])
labels_example = [example for (_, example) in labels_idx_examples]
# Collect features, precomputed if they exist, else compute on the fly
if self._check_labels_features_exist(labels_example, attribute):
(
sequence_features,
sentence_features,
) = self._extract_labels_precomputed_features(labels_example, attribute)
else:
sequence_features = None
sentence_features = self._compute_default_label_features(labels_example)
label_data = RasaModelData()
label_data.add_features(LABEL, SEQUENCE, sequence_features)
label_data.add_features(LABEL, SENTENCE, sentence_features)
if label_data.does_feature_not_exist(
LABEL, SENTENCE
) and label_data.does_feature_not_exist(LABEL, SEQUENCE):
raise ValueError(
"No label features are present. Please check your configuration file."
)
label_ids = np.array([idx for (idx, _) in labels_idx_examples])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
return label_data
def _use_default_label_features(self, label_ids: np.ndarray) -> List[FeatureArray]:
feature_arrays: List[FeatureArray] = self._label_data.get(LABEL, SENTENCE)
all_label_features = feature_arrays[0]
return [
FeatureArray(
np.array([all_label_features[label_id] for label_id in label_ids]),
number_of_dimensions=all_label_features.number_of_dimensions,
)
]
def _create_model_data(
self,
training_data: List[Message],
label_id_dict: Optional[Dict[Text, int]] = None,
label_attribute: Optional[Text] = None,
training: bool = True,
) -> RasaModelData:
"""Prepare data for training and create a RasaModelData object."""
from rasa.utils.tensorflow import model_data_utils
attributes_to_consider = [TEXT]
if training and self.component_config[INTENT_CLASSIFICATION]:
# we don't have any intent labels during prediction, just add them during
# training
attributes_to_consider.append(label_attribute)
if (
training
and self.component_config[ENTITY_RECOGNITION]
and self._entity_tag_specs
):
# Add entities as labels only during training and only if there was
# training data added for entities with DIET configured to predict entities.
attributes_to_consider.append(ENTITIES)
if training and label_attribute is not None:
# only use those training examples that have the label_attribute set
# during training
training_data = [
example for example in training_data if label_attribute in example.data
]
training_data = [
message
for message in training_data
if message.features_present(
attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS)
)
]
if not training_data:
# no training data are present to train
return RasaModelData()
(
features_for_examples,
sparse_feature_sizes,
) = model_data_utils.featurize_training_examples(
training_data,
attributes_to_consider,
entity_tag_specs=self._entity_tag_specs,
featurizers=self.component_config[FEATURIZERS],
bilou_tagging=self.component_config[BILOU_FLAG],
)
attribute_data, _ = model_data_utils.convert_to_data_format(
features_for_examples, consider_dialogue_dimension=False
)
model_data = RasaModelData(
label_key=self.label_key, label_sub_key=self.label_sub_key
)
model_data.add_data(attribute_data)
model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)
# Current implementation doesn't yet account for updating sparse
# feature sizes of label attributes. That's why we remove them.
sparse_feature_sizes = self._remove_label_sparse_feature_sizes(
sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute
)
model_data.add_sparse_feature_sizes(sparse_feature_sizes)
self._add_label_features(
model_data, training_data, label_attribute, label_id_dict, training
)
# make sure all keys are in the same order during training and prediction
# as we rely on the order of key and sub-key when constructing the actual
# tensors from the model data
model_data.sort()
return model_data
@staticmethod
def _remove_label_sparse_feature_sizes(
sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
label_attribute: Optional[Text] = None,
) -> Dict[Text, Dict[Text, List[int]]]:
if label_attribute in sparse_feature_sizes:
del sparse_feature_sizes[label_attribute]
return sparse_feature_sizes
def _add_label_features(
self,
model_data: RasaModelData,
training_data: List[Message],
label_attribute: Text,
label_id_dict: Dict[Text, int],
training: bool = True,
) -> None:
label_ids = []
if training and self.component_config[INTENT_CLASSIFICATION]:
for example in training_data:
if example.get(label_attribute):
label_ids.append(label_id_dict[example.get(label_attribute)])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
model_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
if (
label_attribute
and model_data.does_feature_not_exist(label_attribute, SENTENCE)
and model_data.does_feature_not_exist(label_attribute, SEQUENCE)
):
# no label features are present, get default features from _label_data
model_data.add_features(
LABEL, SENTENCE, self._use_default_label_features(np.array(label_ids))
)
# as label_attribute can have different values, e.g. INTENT or RESPONSE,
# copy over the features to the LABEL key to make
# it easier to access the label features inside the model itself
model_data.update_key(label_attribute, SENTENCE, LABEL, SENTENCE)
model_data.update_key(label_attribute, SEQUENCE, LABEL, SEQUENCE)
model_data.update_key(label_attribute, MASK, LABEL, MASK)
model_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
# train helpers
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
"""Prepares data for training.
Performs sanity checks on training data, extracts encodings for labels.
"""
if self.component_config[BILOU_FLAG]:
bilou_utils.apply_bilou_schema(training_data)
label_id_index_mapping = self._label_id_index_mapping(
training_data, attribute=INTENT
)
if not label_id_index_mapping:
# no labels are present to train
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(
training_data, label_id_index_mapping, attribute=INTENT
)
self._entity_tag_specs = self._create_entity_tag_specs(training_data)
label_attribute = (
INTENT if self.component_config[INTENT_CLASSIFICATION] else None
)
model_data = self._create_model_data(
training_data.nlu_examples,
label_id_index_mapping,
label_attribute=label_attribute,
)
self._check_input_dimension_consistency(model_data)
return model_data
@staticmethod
def _check_enough_labels(model_data: RasaModelData) -> bool:
return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2
def train(self, training_data: TrainingData) -> Resource:
"""Train the embedding intent classifier on a data set."""
model_data = self.preprocess_train_data(training_data)
if model_data.is_empty():
logger.debug(
f"Cannot train '{self.__class__.__name__}'. No data was provided. "
f"Skipping training of the classifier."
)
return self._resource
if not self.model and self.finetune_mode:
raise rasa.shared.exceptions.InvalidParameterException(
f"{self.__class__.__name__} was instantiated "
f"with `model=None` and `finetune_mode=True`. "
f"This is not a valid combination as the component "
f"needs an already instantiated and trained model "
f"to continue training in finetune mode."
)
if self.component_config.get(INTENT_CLASSIFICATION):
if not self._check_enough_labels(model_data):
logger.error(
f"Cannot train '{self.__class__.__name__}'. "
f"Need at least 2 different intent classes. "
f"Skipping training of classifier."
)
return self._resource
if self.component_config.get(ENTITY_RECOGNITION):
self.check_correct_entity_annotations(training_data)
# keep one example for persisting and loading
self._data_example = model_data.first_data_example()
if not self.finetune_mode:
# No pre-trained model to load from. Create a new instance of the model.
self.model = self._instantiate_model_class(model_data)
self.model.compile(
optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE])
)
else:
self.model.adjust_for_incremental_training(
data_example=self._data_example,
new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(),
old_sparse_feature_sizes=self._sparse_feature_sizes,
)
self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()
data_generator, validation_data_generator = train_utils.create_data_generators(
model_data,
self.component_config[BATCH_SIZES],
self.component_config[EPOCHS],
self.component_config[BATCH_STRATEGY],
self.component_config[EVAL_NUM_EXAMPLES],
self.component_config[RANDOM_SEED],
)
callbacks = train_utils.create_common_callbacks(
self.component_config[EPOCHS],
self.component_config[TENSORBOARD_LOG_DIR],
self.component_config[TENSORBOARD_LOG_LEVEL],
self.tmp_checkpoint_dir,
)
self.model.fit(
data_generator,
epochs=self.component_config[EPOCHS],
validation_data=validation_data_generator,
validation_freq=self.component_config[EVAL_NUM_EPOCHS],
callbacks=callbacks,
verbose=False,
shuffle=False, # we use custom shuffle inside data generator
)
self.persist()
return self._resource
# process helpers
def _predict(
self, message: Message
) -> Optional[Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]]:
if self.model is None:
logger.debug(
f"There is no trained model for '{self.__class__.__name__}': The "
f"component is either not trained or didn't receive enough training "
f"data."
)
return None
# create session data from message and convert it into a batch of 1
model_data = self._create_model_data([message], training=False)
if model_data.is_empty():
return None
return self.model.run_inference(model_data)
def _predict_label(
self, predict_out: Optional[Dict[Text, tf.Tensor]]
) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]:
"""Predicts the intent of the provided message."""
label: Dict[Text, Any] = {"name": None, "confidence": 0.0}
label_ranking = []
if predict_out is None:
return label, label_ranking
message_sim = predict_out["i_scores"]
message_sim = message_sim.flatten() # sim is a matrix
# if X contains all zeros do not predict some label
if message_sim.size == 0:
return label, label_ranking
# rank the confidences
ranking_length = self.component_config[RANKING_LENGTH]
renormalize = (
self.component_config[RENORMALIZE_CONFIDENCES]
and self.component_config[MODEL_CONFIDENCE] == SOFTMAX
)
ranked_label_indices, message_sim = train_utils.rank_and_mask(
message_sim, ranking_length=ranking_length, renormalize=renormalize
)
# construct the label and ranking
casted_message_sim: List[float] = message_sim.tolist() # np.float to float
top_label_idx = ranked_label_indices[0]
label = {
"name": self.index_label_id_mapping[top_label_idx],
"confidence": casted_message_sim[top_label_idx],
}
ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]
label_ranking = [
{"name": self.index_label_id_mapping[label_idx], "confidence": score}
for label_idx, score in ranking
]
return label, label_ranking
def _predict_entities(
self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message
) -> List[Dict]:
if predict_out is None:
return []
predicted_tags, confidence_values = train_utils.entity_label_to_tags(
predict_out, self._entity_tag_specs, self.component_config[BILOU_FLAG]
)
entities = self.convert_predictions_into_entities(
message.get(TEXT),
message.get(TOKENS_NAMES[TEXT], []),
predicted_tags,
self.split_entities_config,
confidence_values,
)
entities = self.add_extractor_name(entities)
entities = message.get(ENTITIES, []) + entities
return entities
def process(self, messages: List[Message]) -> List[Message]:
"""Augments the message with intents, entities, and diagnostic data."""
for message in messages:
out = self._predict(message)
if self.component_config[INTENT_CLASSIFICATION]:
label, label_ranking = self._predict_label(out)
message.set(INTENT, label, add_to_output=True)
message.set("intent_ranking", label_ranking, add_to_output=True)
if self.component_config[ENTITY_RECOGNITION]:
entities = self._predict_entities(out, message)
message.set(ENTITIES, entities, add_to_output=True)
if out and self._execution_context.should_add_diagnostic_data:
message.add_diagnostic_data(
self._execution_context.node_name, out.get(DIAGNOSTIC_DATA)
)
return messages
def persist(self) -> None:
"""Persist this model into the passed directory."""
if self.model is None:
return None
with self._model_storage.write_to(self._resource) as model_path:
file_name = self.__class__.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
rasa.shared.utils.io.create_directory_for_file(tf_model_file)
if self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir:
self.model.load_weights(self.tmp_checkpoint_dir / "checkpoint.tf_model")
# Save an empty file to flag that this model has been
# produced using checkpointing
checkpoint_marker = model_path / f"{file_name}.from_checkpoint.pkl"
checkpoint_marker.touch()
self.model.save(str(tf_model_file))
io_utils.pickle_dump(
model_path / f"{file_name}.data_example.pkl", self._data_example
)
io_utils.pickle_dump(
model_path / f"{file_name}.sparse_feature_sizes.pkl",
self._sparse_feature_sizes,
)
io_utils.pickle_dump(
model_path / f"{file_name}.label_data.pkl", dict(self._label_data.data)
)
io_utils.json_pickle(
model_path / f"{file_name}.index_label_id_mapping.json",
self.index_label_id_mapping,
)
entity_tag_specs = (
[tag_spec._asdict() for tag_spec in self._entity_tag_specs]
if self._entity_tag_specs
else []
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
model_path / f"{file_name}.entity_tag_specs.json", entity_tag_specs
)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> DIETClassifier:
"""Loads a policy from the storage (see parent class for full docstring)."""
try:
with model_storage.read_from(resource) as model_path:
return cls._load(
model_path, config, model_storage, resource, execution_context
)
except ValueError:
logger.debug(
f"Failed to load {cls.__class__.__name__} from model storage. Resource "
f"'{resource.name}' doesn't exist."
)
return cls(config, model_storage, resource, execution_context)
@classmethod
def _load(
cls,
model_path: Path,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> "DIETClassifier":
"""Loads the trained model from the provided directory."""
(
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
) = cls._load_from_files(model_path)
config = train_utils.update_confidence_type(config)
config = train_utils.update_similarity_type(config)
model = cls._load_model(
entity_tag_specs,
label_data,
config,
data_example,
model_path,
finetune_mode=execution_context.is_finetuning,
)
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
index_label_id_mapping=index_label_id_mapping,
entity_tag_specs=entity_tag_specs,
model=model,
sparse_feature_sizes=sparse_feature_sizes,
)
@classmethod
def _load_from_files(
cls, model_path: Path
) -> Tuple[
Dict[int, Text],
List[EntityTagSpec],
RasaModelData,
Dict[Text, Dict[Text, List[FeatureArray]]],
Dict[Text, Dict[Text, List[int]]],
]:
file_name = cls.__name__
data_example = io_utils.pickle_load(
model_path / f"{file_name}.data_example.pkl"
)
label_data = io_utils.pickle_load(model_path / f"{file_name}.label_data.pkl")
label_data = RasaModelData(data=label_data)
sparse_feature_sizes = io_utils.pickle_load(
model_path / f"{file_name}.sparse_feature_sizes.pkl"
)
index_label_id_mapping = io_utils.json_unpickle(
model_path / f"{file_name}.index_label_id_mapping.json"
)
entity_tag_specs = rasa.shared.utils.io.read_json_file(
model_path / f"{file_name}.entity_tag_specs.json"
)
entity_tag_specs = [
EntityTagSpec(
tag_name=tag_spec["tag_name"],
ids_to_tags={
int(key): value for key, value in tag_spec["ids_to_tags"].items()
},
tags_to_ids={
key: int(value) for key, value in tag_spec["tags_to_ids"].items()
},
num_tags=tag_spec["num_tags"],
)
for tag_spec in entity_tag_specs
]
# jsonpickle converts dictionary keys to strings
index_label_id_mapping = {
int(key): value for key, value in index_label_id_mapping.items()
}
return (
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
)
@classmethod
def _load_model(
cls,
entity_tag_specs: List[EntityTagSpec],
label_data: RasaModelData,
config: Dict[Text, Any],
data_example: Dict[Text, Dict[Text, List[FeatureArray]]],
model_path: Path,
finetune_mode: bool = False,
) -> "RasaModel":
file_name = cls.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
label_key = LABEL_KEY if config[INTENT_CLASSIFICATION] else None
label_sub_key = LABEL_SUB_KEY if config[INTENT_CLASSIFICATION] else None
model_data_example = RasaModelData(
label_key=label_key, label_sub_key=label_sub_key, data=data_example
)
model = cls._load_model_class(
tf_model_file,
model_data_example,
label_data,
entity_tag_specs,
config,
finetune_mode=finetune_mode,
)
return model
@classmethod
def _load_model_class(
cls,
tf_model_file: Text,
model_data_example: RasaModelData,
label_data: RasaModelData,
entity_tag_specs: List[EntityTagSpec],
config: Dict[Text, Any],
finetune_mode: bool,
) -> "RasaModel":
predict_data_example = RasaModelData(
label_key=model_data_example.label_key,
data={
feature_name: features
for feature_name, features in model_data_example.items()
if TEXT in feature_name
},
)
return cls.model_class().load(
tf_model_file,
model_data_example,
predict_data_example,
data_signature=model_data_example.get_signature(),
label_data=label_data,
entity_tag_specs=entity_tag_specs,
config=copy.deepcopy(config),
finetune_mode=finetune_mode,
)
def _instantiate_model_class(self, model_data: RasaModelData) -> "RasaModel":
return self.model_class()(
data_signature=model_data.get_signature(),
label_data=self._label_data,
entity_tag_specs=self._entity_tag_specs,
config=self.component_config,
)
class DIET(TransformerRasaModel):
def __init__(
self,
data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],
label_data: RasaModelData,
entity_tag_specs: Optional[List[EntityTagSpec]],
config: Dict[Text, Any],
) -> None:
# create entity tag spec before calling super otherwise building the model
# will fail
super().__init__("DIET", config, data_signature, label_data)
self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs)
self.predict_data_signature = {
feature_name: features
for feature_name, features in data_signature.items()
if TEXT in feature_name
}
# tf training
self._create_metrics()
self._update_metrics_to_log()
# needed for efficient prediction
self.all_labels_embed: Optional[tf.Tensor] = None
self._prepare_layers()
@staticmethod
def _ordered_tag_specs(
entity_tag_specs: Optional[List[EntityTagSpec]],
) -> List[EntityTagSpec]:
"""Ensure that order of entity tag specs matches CRF layer order."""
if entity_tag_specs is None:
return []
crf_order = [
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_ROLE,
ENTITY_ATTRIBUTE_GROUP,
]
ordered_tag_spec = []
for tag_name in crf_order:
for tag_spec in entity_tag_specs:
if tag_name == tag_spec.tag_name:
ordered_tag_spec.append(tag_spec)
return ordered_tag_spec
def _check_data(self) -> None:
if TEXT not in self.data_signature:
raise InvalidConfigException(
f"No text features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[INTENT_CLASSIFICATION]:
if LABEL not in self.data_signature:
raise InvalidConfigException(
f"No label features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[SHARE_HIDDEN_LAYERS]:
different_sentence_signatures = False
different_sequence_signatures = False
if (
SENTENCE in self.data_signature[TEXT]
and SENTENCE in self.data_signature[LABEL]
):
different_sentence_signatures = (
self.data_signature[TEXT][SENTENCE]
!= self.data_signature[LABEL][SENTENCE]
)
if (
SEQUENCE in self.data_signature[TEXT]
and SEQUENCE in self.data_signature[LABEL]
):
different_sequence_signatures = (
self.data_signature[TEXT][SEQUENCE]
!= self.data_signature[LABEL][SEQUENCE]
)
if different_sentence_signatures or different_sequence_signatures:
raise ValueError(
"If hidden layer weights are shared, data signatures "
"for text_features and label_features must coincide."
)
if self.config[ENTITY_RECOGNITION] and (
ENTITIES not in self.data_signature
or ENTITY_ATTRIBUTE_TYPE not in self.data_signature[ENTITIES]
):
logger.debug(
f"You specified '{self.__class__.__name__}' to train entities, but "
f"no entities are present in the training data. Skipping training of "
f"entities."
)
self.config[ENTITY_RECOGNITION] = False
def _create_metrics(self) -> None:
# self.metrics will have the same order as they are created
# so create loss metrics first to output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.intent_loss = tf.keras.metrics.Mean(name="i_loss")
self.entity_loss = tf.keras.metrics.Mean(name="e_loss")
self.entity_group_loss = tf.keras.metrics.Mean(name="g_loss")
self.entity_role_loss = tf.keras.metrics.Mean(name="r_loss")
# create accuracy metrics second to output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.intent_acc = tf.keras.metrics.Mean(name="i_acc")
self.entity_f1 = tf.keras.metrics.Mean(name="e_f1")
self.entity_group_f1 = tf.keras.metrics.Mean(name="g_f1")
self.entity_role_f1 = tf.keras.metrics.Mean(name="r_f1")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
if self.config[INTENT_CLASSIFICATION]:
self.metrics_to_log.append("i_acc")
if debug_log_level:
self.metrics_to_log.append("i_loss")
if self.config[ENTITY_RECOGNITION]:
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags != 0:
name = tag_spec.tag_name
self.metrics_to_log.append(f"{name[0]}_f1")
if debug_log_level:
self.metrics_to_log.append(f"{name[0]}_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {
"t": "total",
"i": "intent",
"e": "entity",
"m": "mask",
"r": "role",
"g": "group",
}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _prepare_layers(self) -> None:
# For user text, prepare layers that combine different feature types, embed
# everything using a transformer and optionally also do masked language
# modeling.
self.text_name = TEXT
self._tf_layers[
f"sequence_layer.{self.text_name}"
] = rasa_layers.RasaSequenceLayer(
self.text_name, self.data_signature[self.text_name], self.config
)
if self.config[MASKED_LM]:
self._prepare_mask_lm_loss(self.text_name)
# Intent labels are treated similarly to user text but without the transformer,
# without masked language modelling, and with no dropout applied to the
# individual features, only to the overall label embedding after all label
# features have been combined.
if self.config[INTENT_CLASSIFICATION]:
self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL
# disable input dropout applied to sparse and dense label features
label_config = self.config.copy()
label_config.update(
{SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}
)
self._tf_layers[
f"feature_combining_layer.{self.label_name}"
] = rasa_layers.RasaFeatureCombiningLayer(
self.label_name, self.label_signature[self.label_name], label_config
)
self._prepare_ffnn_layer(
self.label_name,
self.config[HIDDEN_LAYERS_SIZES][self.label_name],
self.config[DROP_RATE],
)
self._prepare_label_classification_layers(predictor_attribute=TEXT)
if self.config[ENTITY_RECOGNITION]:
self._prepare_entity_recognition_layers()
def _prepare_mask_lm_loss(self, name: Text) -> None:
# for embedding predicted tokens at masked positions
self._prepare_embed_layers(f"{name}_lm_mask")
# for embedding the true tokens that got masked
self._prepare_embed_layers(f"{name}_golden_token")
# mask loss is additional loss
# set scaling to False, so that it doesn't overpower other losses
self._prepare_dot_product_loss(f"{name}_mask", scale_loss=False)
def _create_bow(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sequence_feature_lengths: tf.Tensor,
name: Text,
) -> tf.Tensor:
x, _ = self._tf_layers[f"feature_combining_layer.{name}"](
(sequence_features, sentence_features, sequence_feature_lengths),
training=self._training,
)
# convert to bag-of-words by summing along the sequence dimension
x = tf.reduce_sum(x, axis=1)
return self._tf_layers[f"ffnn.{name}"](x, self._training)
def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:
all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]
sequence_feature_lengths = self._get_sequence_feature_lengths(
self.tf_label_data, LABEL
)
x = self._create_bow(
self.tf_label_data[LABEL][SEQUENCE],
self.tf_label_data[LABEL][SENTENCE],
sequence_feature_lengths,
self.label_name,
)
all_labels_embed = self._tf_layers[f"embed.{LABEL}"](x)
return all_label_ids, all_labels_embed
def _mask_loss(
self,
outputs: tf.Tensor,
inputs: tf.Tensor,
seq_ids: tf.Tensor,
mlm_mask_boolean: tf.Tensor,
name: Text,
) -> tf.Tensor:
# make sure there is at least one element in the mask
mlm_mask_boolean = tf.cond(
tf.reduce_any(mlm_mask_boolean),
lambda: mlm_mask_boolean,
lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(mlm_mask_boolean)),
)
mlm_mask_boolean = tf.squeeze(mlm_mask_boolean, -1)
# Pick elements that were masked, throwing away the batch & sequence dimension
# and effectively switching from shape (batch_size, sequence_length, units) to
# (num_masked_elements, units).
outputs = tf.boolean_mask(outputs, mlm_mask_boolean)
inputs = tf.boolean_mask(inputs, mlm_mask_boolean)
ids = tf.boolean_mask(seq_ids, mlm_mask_boolean)
tokens_predicted_embed = self._tf_layers[f"embed.{name}_lm_mask"](outputs)
tokens_true_embed = self._tf_layers[f"embed.{name}_golden_token"](inputs)
# To limit the otherwise computationally expensive loss calculation, we
# constrain the label space in MLM (i.e. token space) to only those tokens that
# were masked in this batch. Hence the reduced list of token embeddings
# (tokens_true_embed) and the reduced list of labels (ids) are passed as
# all_labels_embed and all_labels, respectively. In the future, we could be less
# restrictive and construct a slightly bigger label space which could include
# tokens not masked in the current batch too.
return self._tf_layers[f"loss.{name}_mask"](
inputs_embed=tokens_predicted_embed,
labels_embed=tokens_true_embed,
labels=ids,
all_labels_embed=tokens_true_embed,
all_labels=ids,
)
def _calculate_label_loss(
self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor
) -> tf.Tensor:
all_label_ids, all_labels_embed = self._create_all_labels()
text_embed = self._tf_layers[f"embed.{TEXT}"](text_features)
label_embed = self._tf_layers[f"embed.{LABEL}"](label_features)
return self._tf_layers[f"loss.{LABEL}"](
text_embed, label_embed, label_ids, all_labels_embed, all_label_ids
)
def batch_loss(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> tf.Tensor:
"""Calculates the loss for the given batch.
Args:
batch_in: The batch.
Returns:
The loss of the given batch.
"""
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
(
text_transformed,
text_in,
mask_combined_sequence_sentence,
text_seq_ids,
mlm_mask_boolean_text,
_,
) = self._tf_layers[f"sequence_layer.{self.text_name}"](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
losses = []
# Lengths of sequences in case of sentence-level features are always 1, but they
# can effectively be 0 if sentence-level features aren't present.
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
combined_sequence_sentence_feature_lengths = (
sequence_feature_lengths + sentence_feature_lengths
)
if self.config[MASKED_LM]:
loss, acc = self._mask_loss(
text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT
)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
if self.config[INTENT_CLASSIFICATION]:
loss = self._batch_loss_intent(
combined_sequence_sentence_feature_lengths,
text_transformed,
tf_batch_data,
)
losses.append(loss)
if self.config[ENTITY_RECOGNITION]:
losses += self._batch_loss_entities(
mask_combined_sequence_sentence,
sequence_feature_lengths,
text_transformed,
tf_batch_data,
)
return tf.math.add_n(losses)
def _batch_loss_intent(
self,
combined_sequence_sentence_feature_lengths_text: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> tf.Tensor:
# get sentence features vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths_text
)
sequence_feature_lengths_label = self._get_sequence_feature_lengths(
tf_batch_data, LABEL
)
label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]
label = self._create_bow(
tf_batch_data[LABEL][SEQUENCE],
tf_batch_data[LABEL][SENTENCE],
sequence_feature_lengths_label,
self.label_name,
)
loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids)
self._update_label_metrics(loss, acc)
return loss
def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:
self.intent_loss.update_state(loss)
self.intent_acc.update_state(acc)
def _batch_loss_entities(
self,
mask_combined_sequence_sentence: tf.Tensor,
sequence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> List[tf.Tensor]:
losses = []
entity_tags = None
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags == 0:
continue
tag_ids = tf_batch_data[ENTITIES][tag_spec.tag_name][0]
# add a zero (no entity) for the sentence features to match the shape of
# inputs
tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]])
loss, f1, _logits = self._calculate_entity_loss(
text_transformed,
tag_ids,
mask_combined_sequence_sentence,
sequence_feature_lengths,
tag_spec.tag_name,
entity_tags,
)
if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags
)
self._update_entity_metrics(loss, f1, tag_spec.tag_name)
losses.append(loss)
return losses
def _update_entity_metrics(
self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text
) -> None:
if tag_name == ENTITY_ATTRIBUTE_TYPE:
self.entity_loss.update_state(loss)
self.entity_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
self.entity_group_loss.update_state(loss)
self.entity_group_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_ROLE:
self.entity_role_loss.update_state(loss)
self.entity_role_f1.update_state(f1)
def prepare_for_predict(self) -> None:
"""Prepares the model for prediction."""
if self.config[INTENT_CLASSIFICATION]:
_, self.all_labels_embed = self._create_all_labels()
def batch_predict(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> Dict[Text, tf.Tensor]:
"""Predicts the output of the given batch.
Args:
batch_in: The batch.
Returns:
The output to predict.
"""
tf_batch_data = self.batch_to_model_data_format(
batch_in, self.predict_data_signature
)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
text_transformed, _, _, _, _, attention_weights = self._tf_layers[
f"sequence_layer.{self.text_name}"
](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
predictions = {
DIAGNOSTIC_DATA: {
"attention_weights": attention_weights,
"text_transformed": text_transformed,
}
}
if self.config[INTENT_CLASSIFICATION]:
predictions.update(
self._batch_predict_intents(
sequence_feature_lengths + sentence_feature_lengths,
text_transformed,
)
)
if self.config[ENTITY_RECOGNITION]:
predictions.update(
self._batch_predict_entities(sequence_feature_lengths, text_transformed)
)
return predictions
def _batch_predict_entities(
self, sequence_feature_lengths: tf.Tensor, text_transformed: tf.Tensor
) -> Dict[Text, tf.Tensor]:
predictions: Dict[Text, tf.Tensor] = {}
entity_tags = None
for tag_spec in self._entity_tag_specs:
# skip crf layer if it was not trained
if tag_spec.num_tags == 0:
continue
name = tag_spec.tag_name
_input = text_transformed
if entity_tags is not None:
_tags = self._tf_layers[f"embed.{name}.tags"](entity_tags)
_input = tf.concat([_input, _tags], axis=-1)
_logits = self._tf_layers[f"embed.{name}.logits"](_input)
pred_ids, confidences = self._tf_layers[f"crf.{name}"](
_logits, sequence_feature_lengths
)
predictions[f"e_{name}_ids"] = pred_ids
predictions[f"e_{name}_scores"] = confidences
if name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags
)
return predictions
def _batch_predict_intents(
self,
combined_sequence_sentence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
) -> Dict[Text, tf.Tensor]:
if self.all_labels_embed is None:
raise ValueError(
"The model was not prepared for prediction. "
"Call `prepare_for_predict` first."
)
# get sentence feature vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths
)
sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector)
_, scores = self._tf_layers[
f"loss.{LABEL}"
].get_similarities_and_confidences_from_embeddings(
sentence_vector_embed[:, tf.newaxis, :],
self.all_labels_embed[tf.newaxis, :, :],
)
return {"i_scores": scores}
|
_compute_term_frequency
|
Computes TF metrics for each sentence (column) in the given matrix.
You can read more about smoothing parameter at URL below:
http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import math
from warnings import warn
try:
import numpy
except ImportError:
numpy = None
try:
from numpy.linalg import svd as singular_value_decomposition
except ImportError:
singular_value_decomposition = None
from ._summarizer import AbstractSummarizer
class LsaSummarizer(AbstractSummarizer):
MIN_DIMENSIONS = 3
REDUCTION_RATIO = 1/1
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
self._ensure_dependecies_installed()
dictionary = self._create_dictionary(document)
# empty document
if not dictionary:
return ()
matrix = self._create_matrix(document, dictionary)
matrix = self._compute_term_frequency(matrix)
u, sigma, v = singular_value_decomposition(matrix, full_matrices=False)
ranks = iter(self._compute_ranks(sigma, v))
return self._get_best_sentences(document.sentences, sentences_count,
lambda s: next(ranks))
def _ensure_dependecies_installed(self):
if numpy is None:
raise ValueError("LSA summarizer requires NumPy. Please, install it by command 'pip install numpy'.")
def _create_dictionary(self, document):
"""Creates mapping key = word, value = row index"""
# print(document.words)
words = map(self.normalize_word, document.words)
unique_words = frozenset(self.stem_word(w) for w in words if w not in self._stop_words)
return dict((w, i) for i, w in enumerate(unique_words))
def _create_matrix(self, document, dictionary):
"""
Creates matrix of shape |unique words|×|sentences| where cells
contains number of occurences of words (rows) in senteces (cols).
"""
sentences = document.sentences
words_count = len(dictionary)
sentences_count = len(sentences)
if words_count < sentences_count:
message = (
"Number of words (%d) is lower than number of sentences (%d). "
"LSA algorithm may not work properly."
)
warn(message % (words_count, sentences_count))
# create matrix |unique words|×|sentences| filled with zeroes
matrix = numpy.zeros((words_count, sentences_count))
for col, sentence in enumerate(sentences):
for word in map(self.stem_word, sentence.words):
# only valid words is counted (not stop-words, ...)
if word in dictionary:
row = dictionary[word]
matrix[row, col] += 1
return matrix
# MASKED: _compute_term_frequency function (lines 90-107)
def _compute_ranks(self, sigma, v_matrix):
assert len(sigma) == v_matrix.shape[0], "Matrices should be multiplicable"
dimensions = max(LsaSummarizer.MIN_DIMENSIONS,
int(len(sigma)*LsaSummarizer.REDUCTION_RATIO))
powered_sigma = tuple(s**2 if i < dimensions else 0.0
for i, s in enumerate(sigma))
ranks = []
# iterate over columns of matrix (rows of transposed matrix)
for column_vector in v_matrix.T:
rank = sum(s*v**2 for s, v in zip(powered_sigma, column_vector))
ranks.append(math.sqrt(rank))
return ranks
|
def _compute_term_frequency(self, matrix, smooth=0.4):
"""
Computes TF metrics for each sentence (column) in the given matrix.
You can read more about smoothing parameter at URL below:
http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html
"""
assert 0.0 <= smooth < 1.0
max_word_frequencies = numpy.max(matrix, axis=0)
rows, cols = matrix.shape
for row in range(rows):
for col in range(cols):
max_word_frequency = max_word_frequencies[col]
if max_word_frequency != 0:
frequency = matrix[row, col]/max_word_frequency
matrix[row, col] = smooth + (1.0 - smooth)*frequency
return matrix
| 90
| 107
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import math
from warnings import warn
try:
import numpy
except ImportError:
numpy = None
try:
from numpy.linalg import svd as singular_value_decomposition
except ImportError:
singular_value_decomposition = None
from ._summarizer import AbstractSummarizer
class LsaSummarizer(AbstractSummarizer):
MIN_DIMENSIONS = 3
REDUCTION_RATIO = 1/1
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
self._ensure_dependecies_installed()
dictionary = self._create_dictionary(document)
# empty document
if not dictionary:
return ()
matrix = self._create_matrix(document, dictionary)
matrix = self._compute_term_frequency(matrix)
u, sigma, v = singular_value_decomposition(matrix, full_matrices=False)
ranks = iter(self._compute_ranks(sigma, v))
return self._get_best_sentences(document.sentences, sentences_count,
lambda s: next(ranks))
def _ensure_dependecies_installed(self):
if numpy is None:
raise ValueError("LSA summarizer requires NumPy. Please, install it by command 'pip install numpy'.")
def _create_dictionary(self, document):
"""Creates mapping key = word, value = row index"""
# print(document.words)
words = map(self.normalize_word, document.words)
unique_words = frozenset(self.stem_word(w) for w in words if w not in self._stop_words)
return dict((w, i) for i, w in enumerate(unique_words))
def _create_matrix(self, document, dictionary):
"""
Creates matrix of shape |unique words|×|sentences| where cells
contains number of occurences of words (rows) in senteces (cols).
"""
sentences = document.sentences
words_count = len(dictionary)
sentences_count = len(sentences)
if words_count < sentences_count:
message = (
"Number of words (%d) is lower than number of sentences (%d). "
"LSA algorithm may not work properly."
)
warn(message % (words_count, sentences_count))
# create matrix |unique words|×|sentences| filled with zeroes
matrix = numpy.zeros((words_count, sentences_count))
for col, sentence in enumerate(sentences):
for word in map(self.stem_word, sentence.words):
# only valid words is counted (not stop-words, ...)
if word in dictionary:
row = dictionary[word]
matrix[row, col] += 1
return matrix
def _compute_term_frequency(self, matrix, smooth=0.4):
"""
Computes TF metrics for each sentence (column) in the given matrix.
You can read more about smoothing parameter at URL below:
http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html
"""
assert 0.0 <= smooth < 1.0
max_word_frequencies = numpy.max(matrix, axis=0)
rows, cols = matrix.shape
for row in range(rows):
for col in range(cols):
max_word_frequency = max_word_frequencies[col]
if max_word_frequency != 0:
frequency = matrix[row, col]/max_word_frequency
matrix[row, col] = smooth + (1.0 - smooth)*frequency
return matrix
def _compute_ranks(self, sigma, v_matrix):
assert len(sigma) == v_matrix.shape[0], "Matrices should be multiplicable"
dimensions = max(LsaSummarizer.MIN_DIMENSIONS,
int(len(sigma)*LsaSummarizer.REDUCTION_RATIO))
powered_sigma = tuple(s**2 if i < dimensions else 0.0
for i, s in enumerate(sigma))
ranks = []
# iterate over columns of matrix (rows of transposed matrix)
for column_vector in v_matrix.T:
rank = sum(s*v**2 for s, v in zip(powered_sigma, column_vector))
ranks.append(math.sqrt(rank))
return ranks
|
run
|
Invoca o utilitário `isis2json` com os parâmetros adaptados para a
leitura de arquivos MST de acordo com as definições padrões utilizadas
pelo __main__ da ferramenta `isis2json`.
O resultado de saída pode ser escrito diretamente para um arquivo em disco
ou retornará uma lista contento as linhas passíveis de conversão para
JSON.
Exemplo:
>>> run("file.mst")
>>> [{"mfn": 1}, {"mfn": 2}]
>>> run("file.mst", output_file="/tmp/output.json")
>>> None
|
import os
import logging
import json
from typing import Union, Dict, List
from documentstore_migracao.utils.isis2json import isis2json
logger = logging.getLogger(__name__)
class OutputContainer:
"""Classe que mimetiza a escrita de arquivos para a escrita em uma estrutura
de lista. Cada linha em um arquivo representa uma entrada na lista."""
def __init__(self):
self._lines = []
def write(self, string: str) -> None:
try:
_string = json.loads(string)
except Exception:
pass
else:
self._lines.append(_string)
def close(self):
pass
@property
def lines(self):
return self._lines
def create_output_dir(path):
output_dir = "/".join(path.split("/")[:-1])
if not os.path.exists(output_dir):
logger.debug("Creating folder: %s", output_dir)
os.makedirs(output_dir)
# MASKED: run function (lines 42-85)
|
def run(path: str, output_file: str = "", mongo=False) -> Union[None, List[dict]]:
"""Invoca o utilitário `isis2json` com os parâmetros adaptados para a
leitura de arquivos MST de acordo com as definições padrões utilizadas
pelo __main__ da ferramenta `isis2json`.
O resultado de saída pode ser escrito diretamente para um arquivo em disco
ou retornará uma lista contento as linhas passíveis de conversão para
JSON.
Exemplo:
>>> run("file.mst")
>>> [{"mfn": 1}, {"mfn": 2}]
>>> run("file.mst", output_file="/tmp/output.json")
>>> None
"""
if not os.path.exists(path):
raise FileNotFoundError("File '%s' does not exist.")
if len(output_file) > 0:
output_file = open(output_file, "wb")
else:
output_file = OutputContainer()
isis2json.writeJsonArray(
iterRecords=isis2json.iterMstRecords,
file_name=path,
output=output_file,
qty=isis2json.DEFAULT_QTY,
skip=0,
id_tag=0,
gen_uuid=False,
mongo=mongo,
mfn=True,
isis_json_type=3,
prefix="v",
constant="",
)
output_file.close()
if isinstance(output_file, OutputContainer):
return output_file.lines
| 42
| 85
|
import os
import logging
import json
from typing import Union, Dict, List
from documentstore_migracao.utils.isis2json import isis2json
logger = logging.getLogger(__name__)
class OutputContainer:
"""Classe que mimetiza a escrita de arquivos para a escrita em uma estrutura
de lista. Cada linha em um arquivo representa uma entrada na lista."""
def __init__(self):
self._lines = []
def write(self, string: str) -> None:
try:
_string = json.loads(string)
except Exception:
pass
else:
self._lines.append(_string)
def close(self):
pass
@property
def lines(self):
return self._lines
def create_output_dir(path):
output_dir = "/".join(path.split("/")[:-1])
if not os.path.exists(output_dir):
logger.debug("Creating folder: %s", output_dir)
os.makedirs(output_dir)
def run(path: str, output_file: str = "", mongo=False) -> Union[None, List[dict]]:
"""Invoca o utilitário `isis2json` com os parâmetros adaptados para a
leitura de arquivos MST de acordo com as definições padrões utilizadas
pelo __main__ da ferramenta `isis2json`.
O resultado de saída pode ser escrito diretamente para um arquivo em disco
ou retornará uma lista contento as linhas passíveis de conversão para
JSON.
Exemplo:
>>> run("file.mst")
>>> [{"mfn": 1}, {"mfn": 2}]
>>> run("file.mst", output_file="/tmp/output.json")
>>> None
"""
if not os.path.exists(path):
raise FileNotFoundError("File '%s' does not exist.")
if len(output_file) > 0:
output_file = open(output_file, "wb")
else:
output_file = OutputContainer()
isis2json.writeJsonArray(
iterRecords=isis2json.iterMstRecords,
file_name=path,
output=output_file,
qty=isis2json.DEFAULT_QTY,
skip=0,
id_tag=0,
gen_uuid=False,
mongo=mongo,
mfn=True,
isis_json_type=3,
prefix="v",
constant="",
)
output_file.close()
if isinstance(output_file, OutputContainer):
return output_file.lines
|
parse
|
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
# MASKED: parse function (lines 178-249)
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
| 178
| 249
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
__init__
|
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
# MASKED: __init__ function (lines 295-326)
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
| 295
| 326
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
parse
|
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
# MASKED: parse function (lines 329-383)
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
| 329
| 383
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
__init__
|
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
# MASKED: __init__ function (lines 437-450)
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
| 437
| 450
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
parse
|
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
# MASKED: parse function (lines 453-485)
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
| 453
| 485
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
__init__
|
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
# MASKED: __init__ function (lines 520-533)
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
| 520
| 533
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
parse
|
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
# MASKED: parse function (lines 536-561)
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
| 536
| 561
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
__init__
|
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
# MASKED: __init__ function (lines 592-605)
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
| 592
| 605
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
parse
|
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
# MASKED: parse function (lines 608-633)
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
| 608
| 633
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
__init__
|
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
# MASKED: __init__ function (lines 669-682)
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
| 669
| 682
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
parse
|
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
# MASKED: parse function (lines 685-717)
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
| 685
| 717
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
parse
|
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
# MASKED: parse function (lines 775-816)
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
| 775
| 816
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
__init__
|
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
# MASKED: __init__ function (lines 854-881)
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
| 854
| 881
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.